2015-05-01 Paolo Carlini <paolo.carlini@oracle.com>
[official-gcc.git] / gcc / omp-low.c
blob34e2e5c9ef1118f02a1dd108549c5a8dece31ba4
1 /* Lowering pass for OMP directives. Converts OMP directives into explicit
2 calls to the runtime library (libgomp), data marshalling to implement data
3 sharing and copying clauses, offloading to accelerators, and more.
5 Contributed by Diego Novillo <dnovillo@redhat.com>
7 Copyright (C) 2005-2015 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "hash-set.h"
30 #include "machmode.h"
31 #include "vec.h"
32 #include "double-int.h"
33 #include "input.h"
34 #include "alias.h"
35 #include "symtab.h"
36 #include "wide-int.h"
37 #include "inchash.h"
38 #include "tree.h"
39 #include "fold-const.h"
40 #include "stringpool.h"
41 #include "stor-layout.h"
42 #include "rtl.h"
43 #include "predict.h"
44 #include "hard-reg-set.h"
45 #include "function.h"
46 #include "dominance.h"
47 #include "cfg.h"
48 #include "cfganal.h"
49 #include "basic-block.h"
50 #include "tree-ssa-alias.h"
51 #include "internal-fn.h"
52 #include "gimple-fold.h"
53 #include "gimple-expr.h"
54 #include "is-a.h"
55 #include "gimple.h"
56 #include "gimplify.h"
57 #include "gimple-iterator.h"
58 #include "gimplify-me.h"
59 #include "gimple-walk.h"
60 #include "tree-iterator.h"
61 #include "tree-inline.h"
62 #include "langhooks.h"
63 #include "diagnostic-core.h"
64 #include "gimple-ssa.h"
65 #include "hash-map.h"
66 #include "plugin-api.h"
67 #include "ipa-ref.h"
68 #include "cgraph.h"
69 #include "tree-cfg.h"
70 #include "tree-phinodes.h"
71 #include "ssa-iterators.h"
72 #include "tree-ssanames.h"
73 #include "tree-into-ssa.h"
74 #include "hashtab.h"
75 #include "flags.h"
76 #include "statistics.h"
77 #include "real.h"
78 #include "fixed-value.h"
79 #include "insn-config.h"
80 #include "expmed.h"
81 #include "dojump.h"
82 #include "explow.h"
83 #include "calls.h"
84 #include "emit-rtl.h"
85 #include "varasm.h"
86 #include "stmt.h"
87 #include "expr.h"
88 #include "tree-dfa.h"
89 #include "tree-ssa.h"
90 #include "tree-pass.h"
91 #include "except.h"
92 #include "splay-tree.h"
93 #include "insn-codes.h"
94 #include "optabs.h"
95 #include "cfgloop.h"
96 #include "target.h"
97 #include "common/common-target.h"
98 #include "omp-low.h"
99 #include "gimple-low.h"
100 #include "tree-cfgcleanup.h"
101 #include "pretty-print.h"
102 #include "alloc-pool.h"
103 #include "symbol-summary.h"
104 #include "ipa-prop.h"
105 #include "tree-nested.h"
106 #include "tree-eh.h"
107 #include "cilk.h"
108 #include "context.h"
109 #include "lto-section-names.h"
110 #include "gomp-constants.h"
113 /* Lowering of OMP parallel and workshare constructs proceeds in two
114 phases. The first phase scans the function looking for OMP statements
115 and then for variables that must be replaced to satisfy data sharing
116 clauses. The second phase expands code for the constructs, as well as
117 re-gimplifying things when variables have been replaced with complex
118 expressions.
120 Final code generation is done by pass_expand_omp. The flowgraph is
121 scanned for regions which are then moved to a new
122 function, to be invoked by the thread library, or offloaded. */
124 /* OMP region information. Every parallel and workshare
125 directive is enclosed between two markers, the OMP_* directive
126 and a corresponding OMP_RETURN statement. */
128 struct omp_region
130 /* The enclosing region. */
131 struct omp_region *outer;
133 /* First child region. */
134 struct omp_region *inner;
136 /* Next peer region. */
137 struct omp_region *next;
139 /* Block containing the omp directive as its last stmt. */
140 basic_block entry;
142 /* Block containing the OMP_RETURN as its last stmt. */
143 basic_block exit;
145 /* Block containing the OMP_CONTINUE as its last stmt. */
146 basic_block cont;
148 /* If this is a combined parallel+workshare region, this is a list
149 of additional arguments needed by the combined parallel+workshare
150 library call. */
151 vec<tree, va_gc> *ws_args;
153 /* The code for the omp directive of this region. */
154 enum gimple_code type;
156 /* Schedule kind, only used for OMP_FOR type regions. */
157 enum omp_clause_schedule_kind sched_kind;
159 /* True if this is a combined parallel+workshare region. */
160 bool is_combined_parallel;
163 /* Levels of parallelism as defined by OpenACC. Increasing numbers
164 correspond to deeper loop nesting levels. */
165 #define MASK_GANG 1
166 #define MASK_WORKER 2
167 #define MASK_VECTOR 4
169 /* Context structure. Used to store information about each parallel
170 directive in the code. */
172 typedef struct omp_context
174 /* This field must be at the beginning, as we do "inheritance": Some
175 callback functions for tree-inline.c (e.g., omp_copy_decl)
176 receive a copy_body_data pointer that is up-casted to an
177 omp_context pointer. */
178 copy_body_data cb;
180 /* The tree of contexts corresponding to the encountered constructs. */
181 struct omp_context *outer;
182 gimple stmt;
184 /* Map variables to fields in a structure that allows communication
185 between sending and receiving threads. */
186 splay_tree field_map;
187 tree record_type;
188 tree sender_decl;
189 tree receiver_decl;
191 /* These are used just by task contexts, if task firstprivate fn is
192 needed. srecord_type is used to communicate from the thread
193 that encountered the task construct to task firstprivate fn,
194 record_type is allocated by GOMP_task, initialized by task firstprivate
195 fn and passed to the task body fn. */
196 splay_tree sfield_map;
197 tree srecord_type;
199 /* A chain of variables to add to the top-level block surrounding the
200 construct. In the case of a parallel, this is in the child function. */
201 tree block_vars;
203 /* A map of reduction pointer variables. For accelerators, each
204 reduction variable is replaced with an array. Each thread, in turn,
205 is assigned to a slot on that array. */
206 splay_tree reduction_map;
208 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
209 barriers should jump to during omplower pass. */
210 tree cancel_label;
212 /* What to do with variables with implicitly determined sharing
213 attributes. */
214 enum omp_clause_default_kind default_kind;
216 /* Nesting depth of this context. Used to beautify error messages re
217 invalid gotos. The outermost ctx is depth 1, with depth 0 being
218 reserved for the main body of the function. */
219 int depth;
221 /* True if this parallel directive is nested within another. */
222 bool is_nested;
224 /* True if this construct can be cancelled. */
225 bool cancellable;
227 /* For OpenACC loops, a mask of gang, worker and vector used at
228 levels below this one. */
229 int gwv_below;
230 /* For OpenACC loops, a mask of gang, worker and vector used at
231 this level and above. For parallel and kernels clauses, a mask
232 indicating which of num_gangs/num_workers/num_vectors was used. */
233 int gwv_this;
234 } omp_context;
236 /* A structure holding the elements of:
237 for (V = N1; V cond N2; V += STEP) [...] */
239 struct omp_for_data_loop
241 tree v, n1, n2, step;
242 enum tree_code cond_code;
245 /* A structure describing the main elements of a parallel loop. */
247 struct omp_for_data
249 struct omp_for_data_loop loop;
250 tree chunk_size;
251 gomp_for *for_stmt;
252 tree pre, iter_type;
253 int collapse;
254 bool have_nowait, have_ordered;
255 enum omp_clause_schedule_kind sched_kind;
256 struct omp_for_data_loop *loops;
260 static splay_tree all_contexts;
261 static int taskreg_nesting_level;
262 static int target_nesting_level;
263 static struct omp_region *root_omp_region;
264 static bitmap task_shared_vars;
265 static vec<omp_context *> taskreg_contexts;
267 static void scan_omp (gimple_seq *, omp_context *);
268 static tree scan_omp_1_op (tree *, int *, void *);
270 #define WALK_SUBSTMTS \
271 case GIMPLE_BIND: \
272 case GIMPLE_TRY: \
273 case GIMPLE_CATCH: \
274 case GIMPLE_EH_FILTER: \
275 case GIMPLE_TRANSACTION: \
276 /* The sub-statements for these should be walked. */ \
277 *handled_ops_p = false; \
278 break;
280 /* Helper function to get the name of the array containing the partial
281 reductions for OpenACC reductions. */
282 static const char *
283 oacc_get_reduction_array_id (tree node)
285 const char *id = IDENTIFIER_POINTER (DECL_NAME (node));
286 int len = strlen ("OACC") + strlen (id);
287 char *temp_name = XALLOCAVEC (char, len + 1);
288 snprintf (temp_name, len + 1, "OACC%s", id);
289 return IDENTIFIER_POINTER (get_identifier (temp_name));
292 /* Determine the number of threads OpenACC threads used to determine the
293 size of the array of partial reductions. Currently, this is num_gangs
294 * vector_length. This value may be different than GOACC_GET_NUM_THREADS,
295 because it is independed of the device used. */
297 static tree
298 oacc_max_threads (omp_context *ctx)
300 tree nthreads, vector_length, gangs, clauses;
302 gangs = fold_convert (sizetype, integer_one_node);
303 vector_length = gangs;
305 /* The reduction clause may be nested inside a loop directive.
306 Scan for the innermost vector_length clause. */
307 for (omp_context *oc = ctx; oc; oc = oc->outer)
309 if (gimple_code (oc->stmt) != GIMPLE_OMP_TARGET
310 || (gimple_omp_target_kind (oc->stmt)
311 != GF_OMP_TARGET_KIND_OACC_PARALLEL))
312 continue;
314 clauses = gimple_omp_target_clauses (oc->stmt);
316 vector_length = find_omp_clause (clauses, OMP_CLAUSE_VECTOR_LENGTH);
317 if (vector_length)
318 vector_length = fold_convert_loc (OMP_CLAUSE_LOCATION (vector_length),
319 sizetype,
320 OMP_CLAUSE_VECTOR_LENGTH_EXPR
321 (vector_length));
322 else
323 vector_length = fold_convert (sizetype, integer_one_node);
325 gangs = find_omp_clause (clauses, OMP_CLAUSE_NUM_GANGS);
326 if (gangs)
327 gangs = fold_convert_loc (OMP_CLAUSE_LOCATION (gangs), sizetype,
328 OMP_CLAUSE_NUM_GANGS_EXPR (gangs));
329 else
330 gangs = fold_convert (sizetype, integer_one_node);
332 break;
335 nthreads = fold_build2 (MULT_EXPR, sizetype, gangs, vector_length);
337 return nthreads;
340 /* Holds offload tables with decls. */
341 vec<tree, va_gc> *offload_funcs, *offload_vars;
343 /* Convenience function for calling scan_omp_1_op on tree operands. */
345 static inline tree
346 scan_omp_op (tree *tp, omp_context *ctx)
348 struct walk_stmt_info wi;
350 memset (&wi, 0, sizeof (wi));
351 wi.info = ctx;
352 wi.want_locations = true;
354 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
357 static void lower_omp (gimple_seq *, omp_context *);
358 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
359 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
361 /* Find an OMP clause of type KIND within CLAUSES. */
363 tree
364 find_omp_clause (tree clauses, enum omp_clause_code kind)
366 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
367 if (OMP_CLAUSE_CODE (clauses) == kind)
368 return clauses;
370 return NULL_TREE;
373 /* Return true if CTX is for an omp parallel. */
375 static inline bool
376 is_parallel_ctx (omp_context *ctx)
378 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
382 /* Return true if CTX is for an omp task. */
384 static inline bool
385 is_task_ctx (omp_context *ctx)
387 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
391 /* Return true if CTX is for an omp parallel or omp task. */
393 static inline bool
394 is_taskreg_ctx (omp_context *ctx)
396 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
397 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
401 /* Return true if REGION is a combined parallel+workshare region. */
403 static inline bool
404 is_combined_parallel (struct omp_region *region)
406 return region->is_combined_parallel;
410 /* Extract the header elements of parallel loop FOR_STMT and store
411 them into *FD. */
413 static void
414 extract_omp_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
415 struct omp_for_data_loop *loops)
417 tree t, var, *collapse_iter, *collapse_count;
418 tree count = NULL_TREE, iter_type = long_integer_type_node;
419 struct omp_for_data_loop *loop;
420 int i;
421 struct omp_for_data_loop dummy_loop;
422 location_t loc = gimple_location (for_stmt);
423 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
424 bool distribute = gimple_omp_for_kind (for_stmt)
425 == GF_OMP_FOR_KIND_DISTRIBUTE;
427 fd->for_stmt = for_stmt;
428 fd->pre = NULL;
429 fd->collapse = gimple_omp_for_collapse (for_stmt);
430 if (fd->collapse > 1)
431 fd->loops = loops;
432 else
433 fd->loops = &fd->loop;
435 fd->have_nowait = distribute || simd;
436 fd->have_ordered = false;
437 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
438 fd->chunk_size = NULL_TREE;
439 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
440 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
441 collapse_iter = NULL;
442 collapse_count = NULL;
444 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
445 switch (OMP_CLAUSE_CODE (t))
447 case OMP_CLAUSE_NOWAIT:
448 fd->have_nowait = true;
449 break;
450 case OMP_CLAUSE_ORDERED:
451 fd->have_ordered = true;
452 break;
453 case OMP_CLAUSE_SCHEDULE:
454 gcc_assert (!distribute);
455 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
456 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
457 break;
458 case OMP_CLAUSE_DIST_SCHEDULE:
459 gcc_assert (distribute);
460 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
461 break;
462 case OMP_CLAUSE_COLLAPSE:
463 if (fd->collapse > 1)
465 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
466 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
468 break;
469 default:
470 break;
473 /* FIXME: for now map schedule(auto) to schedule(static).
474 There should be analysis to determine whether all iterations
475 are approximately the same amount of work (then schedule(static)
476 is best) or if it varies (then schedule(dynamic,N) is better). */
477 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
479 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
480 gcc_assert (fd->chunk_size == NULL);
482 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
483 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
484 gcc_assert (fd->chunk_size == NULL);
485 else if (fd->chunk_size == NULL)
487 /* We only need to compute a default chunk size for ordered
488 static loops and dynamic loops. */
489 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
490 || fd->have_ordered)
491 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
492 ? integer_zero_node : integer_one_node;
495 for (i = 0; i < fd->collapse; i++)
497 if (fd->collapse == 1)
498 loop = &fd->loop;
499 else if (loops != NULL)
500 loop = loops + i;
501 else
502 loop = &dummy_loop;
504 loop->v = gimple_omp_for_index (for_stmt, i);
505 gcc_assert (SSA_VAR_P (loop->v));
506 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
507 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
508 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
509 loop->n1 = gimple_omp_for_initial (for_stmt, i);
511 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
512 loop->n2 = gimple_omp_for_final (for_stmt, i);
513 switch (loop->cond_code)
515 case LT_EXPR:
516 case GT_EXPR:
517 break;
518 case NE_EXPR:
519 gcc_assert (gimple_omp_for_kind (for_stmt)
520 == GF_OMP_FOR_KIND_CILKSIMD
521 || (gimple_omp_for_kind (for_stmt)
522 == GF_OMP_FOR_KIND_CILKFOR));
523 break;
524 case LE_EXPR:
525 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
526 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
527 else
528 loop->n2 = fold_build2_loc (loc,
529 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
530 build_int_cst (TREE_TYPE (loop->n2), 1));
531 loop->cond_code = LT_EXPR;
532 break;
533 case GE_EXPR:
534 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
535 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
536 else
537 loop->n2 = fold_build2_loc (loc,
538 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
539 build_int_cst (TREE_TYPE (loop->n2), 1));
540 loop->cond_code = GT_EXPR;
541 break;
542 default:
543 gcc_unreachable ();
546 t = gimple_omp_for_incr (for_stmt, i);
547 gcc_assert (TREE_OPERAND (t, 0) == var);
548 switch (TREE_CODE (t))
550 case PLUS_EXPR:
551 loop->step = TREE_OPERAND (t, 1);
552 break;
553 case POINTER_PLUS_EXPR:
554 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
555 break;
556 case MINUS_EXPR:
557 loop->step = TREE_OPERAND (t, 1);
558 loop->step = fold_build1_loc (loc,
559 NEGATE_EXPR, TREE_TYPE (loop->step),
560 loop->step);
561 break;
562 default:
563 gcc_unreachable ();
566 if (simd
567 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
568 && !fd->have_ordered))
570 if (fd->collapse == 1)
571 iter_type = TREE_TYPE (loop->v);
572 else if (i == 0
573 || TYPE_PRECISION (iter_type)
574 < TYPE_PRECISION (TREE_TYPE (loop->v)))
575 iter_type
576 = build_nonstandard_integer_type
577 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
579 else if (iter_type != long_long_unsigned_type_node)
581 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
582 iter_type = long_long_unsigned_type_node;
583 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
584 && TYPE_PRECISION (TREE_TYPE (loop->v))
585 >= TYPE_PRECISION (iter_type))
587 tree n;
589 if (loop->cond_code == LT_EXPR)
590 n = fold_build2_loc (loc,
591 PLUS_EXPR, TREE_TYPE (loop->v),
592 loop->n2, loop->step);
593 else
594 n = loop->n1;
595 if (TREE_CODE (n) != INTEGER_CST
596 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
597 iter_type = long_long_unsigned_type_node;
599 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
600 > TYPE_PRECISION (iter_type))
602 tree n1, n2;
604 if (loop->cond_code == LT_EXPR)
606 n1 = loop->n1;
607 n2 = fold_build2_loc (loc,
608 PLUS_EXPR, TREE_TYPE (loop->v),
609 loop->n2, loop->step);
611 else
613 n1 = fold_build2_loc (loc,
614 MINUS_EXPR, TREE_TYPE (loop->v),
615 loop->n2, loop->step);
616 n2 = loop->n1;
618 if (TREE_CODE (n1) != INTEGER_CST
619 || TREE_CODE (n2) != INTEGER_CST
620 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
621 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
622 iter_type = long_long_unsigned_type_node;
626 if (collapse_count && *collapse_count == NULL)
628 t = fold_binary (loop->cond_code, boolean_type_node,
629 fold_convert (TREE_TYPE (loop->v), loop->n1),
630 fold_convert (TREE_TYPE (loop->v), loop->n2));
631 if (t && integer_zerop (t))
632 count = build_zero_cst (long_long_unsigned_type_node);
633 else if ((i == 0 || count != NULL_TREE)
634 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
635 && TREE_CONSTANT (loop->n1)
636 && TREE_CONSTANT (loop->n2)
637 && TREE_CODE (loop->step) == INTEGER_CST)
639 tree itype = TREE_TYPE (loop->v);
641 if (POINTER_TYPE_P (itype))
642 itype = signed_type_for (itype);
643 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
644 t = fold_build2_loc (loc,
645 PLUS_EXPR, itype,
646 fold_convert_loc (loc, itype, loop->step), t);
647 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
648 fold_convert_loc (loc, itype, loop->n2));
649 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
650 fold_convert_loc (loc, itype, loop->n1));
651 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
652 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
653 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
654 fold_build1_loc (loc, NEGATE_EXPR, itype,
655 fold_convert_loc (loc, itype,
656 loop->step)));
657 else
658 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
659 fold_convert_loc (loc, itype, loop->step));
660 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
661 if (count != NULL_TREE)
662 count = fold_build2_loc (loc,
663 MULT_EXPR, long_long_unsigned_type_node,
664 count, t);
665 else
666 count = t;
667 if (TREE_CODE (count) != INTEGER_CST)
668 count = NULL_TREE;
670 else if (count && !integer_zerop (count))
671 count = NULL_TREE;
675 if (count
676 && !simd
677 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
678 || fd->have_ordered))
680 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
681 iter_type = long_long_unsigned_type_node;
682 else
683 iter_type = long_integer_type_node;
685 else if (collapse_iter && *collapse_iter != NULL)
686 iter_type = TREE_TYPE (*collapse_iter);
687 fd->iter_type = iter_type;
688 if (collapse_iter && *collapse_iter == NULL)
689 *collapse_iter = create_tmp_var (iter_type, ".iter");
690 if (collapse_count && *collapse_count == NULL)
692 if (count)
693 *collapse_count = fold_convert_loc (loc, iter_type, count);
694 else
695 *collapse_count = create_tmp_var (iter_type, ".count");
698 if (fd->collapse > 1)
700 fd->loop.v = *collapse_iter;
701 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
702 fd->loop.n2 = *collapse_count;
703 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
704 fd->loop.cond_code = LT_EXPR;
707 /* For OpenACC loops, force a chunk size of one, as this avoids the default
708 scheduling where several subsequent iterations are being executed by the
709 same thread. */
710 if (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
712 gcc_assert (fd->chunk_size == NULL_TREE);
713 fd->chunk_size = build_int_cst (TREE_TYPE (fd->loop.v), 1);
718 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
719 is the immediate dominator of PAR_ENTRY_BB, return true if there
720 are no data dependencies that would prevent expanding the parallel
721 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
723 When expanding a combined parallel+workshare region, the call to
724 the child function may need additional arguments in the case of
725 GIMPLE_OMP_FOR regions. In some cases, these arguments are
726 computed out of variables passed in from the parent to the child
727 via 'struct .omp_data_s'. For instance:
729 #pragma omp parallel for schedule (guided, i * 4)
730 for (j ...)
732 Is lowered into:
734 # BLOCK 2 (PAR_ENTRY_BB)
735 .omp_data_o.i = i;
736 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
738 # BLOCK 3 (WS_ENTRY_BB)
739 .omp_data_i = &.omp_data_o;
740 D.1667 = .omp_data_i->i;
741 D.1598 = D.1667 * 4;
742 #pragma omp for schedule (guided, D.1598)
744 When we outline the parallel region, the call to the child function
745 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
746 that value is computed *after* the call site. So, in principle we
747 cannot do the transformation.
749 To see whether the code in WS_ENTRY_BB blocks the combined
750 parallel+workshare call, we collect all the variables used in the
751 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
752 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
753 call.
755 FIXME. If we had the SSA form built at this point, we could merely
756 hoist the code in block 3 into block 2 and be done with it. But at
757 this point we don't have dataflow information and though we could
758 hack something up here, it is really not worth the aggravation. */
760 static bool
761 workshare_safe_to_combine_p (basic_block ws_entry_bb)
763 struct omp_for_data fd;
764 gimple ws_stmt = last_stmt (ws_entry_bb);
766 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
767 return true;
769 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
771 extract_omp_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
773 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
774 return false;
775 if (fd.iter_type != long_integer_type_node)
776 return false;
778 /* FIXME. We give up too easily here. If any of these arguments
779 are not constants, they will likely involve variables that have
780 been mapped into fields of .omp_data_s for sharing with the child
781 function. With appropriate data flow, it would be possible to
782 see through this. */
783 if (!is_gimple_min_invariant (fd.loop.n1)
784 || !is_gimple_min_invariant (fd.loop.n2)
785 || !is_gimple_min_invariant (fd.loop.step)
786 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
787 return false;
789 return true;
793 /* Collect additional arguments needed to emit a combined
794 parallel+workshare call. WS_STMT is the workshare directive being
795 expanded. */
797 static vec<tree, va_gc> *
798 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
800 tree t;
801 location_t loc = gimple_location (ws_stmt);
802 vec<tree, va_gc> *ws_args;
804 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
806 struct omp_for_data fd;
807 tree n1, n2;
809 extract_omp_for_data (for_stmt, &fd, NULL);
810 n1 = fd.loop.n1;
811 n2 = fd.loop.n2;
813 if (gimple_omp_for_combined_into_p (for_stmt))
815 tree innerc
816 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
817 OMP_CLAUSE__LOOPTEMP_);
818 gcc_assert (innerc);
819 n1 = OMP_CLAUSE_DECL (innerc);
820 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
821 OMP_CLAUSE__LOOPTEMP_);
822 gcc_assert (innerc);
823 n2 = OMP_CLAUSE_DECL (innerc);
826 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
828 t = fold_convert_loc (loc, long_integer_type_node, n1);
829 ws_args->quick_push (t);
831 t = fold_convert_loc (loc, long_integer_type_node, n2);
832 ws_args->quick_push (t);
834 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
835 ws_args->quick_push (t);
837 if (fd.chunk_size)
839 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
840 ws_args->quick_push (t);
843 return ws_args;
845 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
847 /* Number of sections is equal to the number of edges from the
848 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
849 the exit of the sections region. */
850 basic_block bb = single_succ (gimple_bb (ws_stmt));
851 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
852 vec_alloc (ws_args, 1);
853 ws_args->quick_push (t);
854 return ws_args;
857 gcc_unreachable ();
861 /* Discover whether REGION is a combined parallel+workshare region. */
863 static void
864 determine_parallel_type (struct omp_region *region)
866 basic_block par_entry_bb, par_exit_bb;
867 basic_block ws_entry_bb, ws_exit_bb;
869 if (region == NULL || region->inner == NULL
870 || region->exit == NULL || region->inner->exit == NULL
871 || region->inner->cont == NULL)
872 return;
874 /* We only support parallel+for and parallel+sections. */
875 if (region->type != GIMPLE_OMP_PARALLEL
876 || (region->inner->type != GIMPLE_OMP_FOR
877 && region->inner->type != GIMPLE_OMP_SECTIONS))
878 return;
880 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
881 WS_EXIT_BB -> PAR_EXIT_BB. */
882 par_entry_bb = region->entry;
883 par_exit_bb = region->exit;
884 ws_entry_bb = region->inner->entry;
885 ws_exit_bb = region->inner->exit;
887 if (single_succ (par_entry_bb) == ws_entry_bb
888 && single_succ (ws_exit_bb) == par_exit_bb
889 && workshare_safe_to_combine_p (ws_entry_bb)
890 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
891 || (last_and_only_stmt (ws_entry_bb)
892 && last_and_only_stmt (par_exit_bb))))
894 gimple par_stmt = last_stmt (par_entry_bb);
895 gimple ws_stmt = last_stmt (ws_entry_bb);
897 if (region->inner->type == GIMPLE_OMP_FOR)
899 /* If this is a combined parallel loop, we need to determine
900 whether or not to use the combined library calls. There
901 are two cases where we do not apply the transformation:
902 static loops and any kind of ordered loop. In the first
903 case, we already open code the loop so there is no need
904 to do anything else. In the latter case, the combined
905 parallel loop call would still need extra synchronization
906 to implement ordered semantics, so there would not be any
907 gain in using the combined call. */
908 tree clauses = gimple_omp_for_clauses (ws_stmt);
909 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
910 if (c == NULL
911 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
912 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
914 region->is_combined_parallel = false;
915 region->inner->is_combined_parallel = false;
916 return;
920 region->is_combined_parallel = true;
921 region->inner->is_combined_parallel = true;
922 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
927 /* Return true if EXPR is variable sized. */
929 static inline bool
930 is_variable_sized (const_tree expr)
932 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
935 /* Return true if DECL is a reference type. */
937 static inline bool
938 is_reference (tree decl)
940 return lang_hooks.decls.omp_privatize_by_reference (decl);
943 /* Return the type of a decl. If the decl is reference type,
944 return its base type. */
945 static inline tree
946 get_base_type (tree decl)
948 tree type = TREE_TYPE (decl);
949 if (is_reference (decl))
950 type = TREE_TYPE (type);
951 return type;
954 /* Lookup variables. The "maybe" form
955 allows for the variable form to not have been entered, otherwise we
956 assert that the variable must have been entered. */
958 static inline tree
959 lookup_decl (tree var, omp_context *ctx)
961 tree *n = ctx->cb.decl_map->get (var);
962 return *n;
965 static inline tree
966 maybe_lookup_decl (const_tree var, omp_context *ctx)
968 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
969 return n ? *n : NULL_TREE;
972 static inline tree
973 lookup_field (tree var, omp_context *ctx)
975 splay_tree_node n;
976 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
977 return (tree) n->value;
980 static inline tree
981 lookup_sfield (tree var, omp_context *ctx)
983 splay_tree_node n;
984 n = splay_tree_lookup (ctx->sfield_map
985 ? ctx->sfield_map : ctx->field_map,
986 (splay_tree_key) var);
987 return (tree) n->value;
990 static inline tree
991 maybe_lookup_field (tree var, omp_context *ctx)
993 splay_tree_node n;
994 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
995 return n ? (tree) n->value : NULL_TREE;
998 static inline tree
999 lookup_oacc_reduction (const char *id, omp_context *ctx)
1001 splay_tree_node n;
1002 n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) id);
1003 return (tree) n->value;
1006 static inline tree
1007 maybe_lookup_oacc_reduction (tree var, omp_context *ctx)
1009 splay_tree_node n = NULL;
1010 if (ctx->reduction_map)
1011 n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) var);
1012 return n ? (tree) n->value : NULL_TREE;
1015 /* Return true if DECL should be copied by pointer. SHARED_CTX is
1016 the parallel context if DECL is to be shared. */
1018 static bool
1019 use_pointer_for_field (tree decl, omp_context *shared_ctx)
1021 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
1022 return true;
1024 /* We can only use copy-in/copy-out semantics for shared variables
1025 when we know the value is not accessible from an outer scope. */
1026 if (shared_ctx)
1028 gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
1030 /* ??? Trivially accessible from anywhere. But why would we even
1031 be passing an address in this case? Should we simply assert
1032 this to be false, or should we have a cleanup pass that removes
1033 these from the list of mappings? */
1034 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
1035 return true;
1037 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
1038 without analyzing the expression whether or not its location
1039 is accessible to anyone else. In the case of nested parallel
1040 regions it certainly may be. */
1041 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1042 return true;
1044 /* Do not use copy-in/copy-out for variables that have their
1045 address taken. */
1046 if (TREE_ADDRESSABLE (decl))
1047 return true;
1049 /* lower_send_shared_vars only uses copy-in, but not copy-out
1050 for these. */
1051 if (TREE_READONLY (decl)
1052 || ((TREE_CODE (decl) == RESULT_DECL
1053 || TREE_CODE (decl) == PARM_DECL)
1054 && DECL_BY_REFERENCE (decl)))
1055 return false;
1057 /* Disallow copy-in/out in nested parallel if
1058 decl is shared in outer parallel, otherwise
1059 each thread could store the shared variable
1060 in its own copy-in location, making the
1061 variable no longer really shared. */
1062 if (shared_ctx->is_nested)
1064 omp_context *up;
1066 for (up = shared_ctx->outer; up; up = up->outer)
1067 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
1068 break;
1070 if (up)
1072 tree c;
1074 for (c = gimple_omp_taskreg_clauses (up->stmt);
1075 c; c = OMP_CLAUSE_CHAIN (c))
1076 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
1077 && OMP_CLAUSE_DECL (c) == decl)
1078 break;
1080 if (c)
1081 goto maybe_mark_addressable_and_ret;
1085 /* For tasks avoid using copy-in/out. As tasks can be
1086 deferred or executed in different thread, when GOMP_task
1087 returns, the task hasn't necessarily terminated. */
1088 if (is_task_ctx (shared_ctx))
1090 tree outer;
1091 maybe_mark_addressable_and_ret:
1092 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
1093 if (is_gimple_reg (outer))
1095 /* Taking address of OUTER in lower_send_shared_vars
1096 might need regimplification of everything that uses the
1097 variable. */
1098 if (!task_shared_vars)
1099 task_shared_vars = BITMAP_ALLOC (NULL);
1100 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
1101 TREE_ADDRESSABLE (outer) = 1;
1103 return true;
1107 return false;
1110 /* Construct a new automatic decl similar to VAR. */
1112 static tree
1113 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
1115 tree copy = copy_var_decl (var, name, type);
1117 DECL_CONTEXT (copy) = current_function_decl;
1118 DECL_CHAIN (copy) = ctx->block_vars;
1119 ctx->block_vars = copy;
1121 return copy;
1124 static tree
1125 omp_copy_decl_1 (tree var, omp_context *ctx)
1127 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
1130 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
1131 as appropriate. */
1132 static tree
1133 omp_build_component_ref (tree obj, tree field)
1135 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
1136 if (TREE_THIS_VOLATILE (field))
1137 TREE_THIS_VOLATILE (ret) |= 1;
1138 if (TREE_READONLY (field))
1139 TREE_READONLY (ret) |= 1;
1140 return ret;
1143 /* Build tree nodes to access the field for VAR on the receiver side. */
1145 static tree
1146 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
1148 tree x, field = lookup_field (var, ctx);
1150 /* If the receiver record type was remapped in the child function,
1151 remap the field into the new record type. */
1152 x = maybe_lookup_field (field, ctx);
1153 if (x != NULL)
1154 field = x;
1156 x = build_simple_mem_ref (ctx->receiver_decl);
1157 x = omp_build_component_ref (x, field);
1158 if (by_ref)
1159 x = build_simple_mem_ref (x);
1161 return x;
1164 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1165 of a parallel, this is a component reference; for workshare constructs
1166 this is some variable. */
1168 static tree
1169 build_outer_var_ref (tree var, omp_context *ctx)
1171 tree x;
1173 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1174 x = var;
1175 else if (is_variable_sized (var))
1177 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1178 x = build_outer_var_ref (x, ctx);
1179 x = build_simple_mem_ref (x);
1181 else if (is_taskreg_ctx (ctx))
1183 bool by_ref = use_pointer_for_field (var, NULL);
1184 x = build_receiver_ref (var, by_ref, ctx);
1186 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1187 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1189 /* #pragma omp simd isn't a worksharing construct, and can reference even
1190 private vars in its linear etc. clauses. */
1191 x = NULL_TREE;
1192 if (ctx->outer && is_taskreg_ctx (ctx))
1193 x = lookup_decl (var, ctx->outer);
1194 else if (ctx->outer)
1195 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1196 if (x == NULL_TREE)
1197 x = var;
1199 else if (ctx->outer)
1200 x = lookup_decl (var, ctx->outer);
1201 else if (is_reference (var))
1202 /* This can happen with orphaned constructs. If var is reference, it is
1203 possible it is shared and as such valid. */
1204 x = var;
1205 else
1206 gcc_unreachable ();
1208 if (is_reference (var))
1209 x = build_simple_mem_ref (x);
1211 return x;
1214 /* Build tree nodes to access the field for VAR on the sender side. */
1216 static tree
1217 build_sender_ref (tree var, omp_context *ctx)
1219 tree field = lookup_sfield (var, ctx);
1220 return omp_build_component_ref (ctx->sender_decl, field);
1223 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1225 static void
1226 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1228 tree field, type, sfield = NULL_TREE;
1230 gcc_assert ((mask & 1) == 0
1231 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1232 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1233 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1234 gcc_assert ((mask & 3) == 3
1235 || !is_gimple_omp_oacc (ctx->stmt));
1237 type = TREE_TYPE (var);
1238 if (mask & 4)
1240 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1241 type = build_pointer_type (build_pointer_type (type));
1243 else if (by_ref)
1244 type = build_pointer_type (type);
1245 else if ((mask & 3) == 1 && is_reference (var))
1246 type = TREE_TYPE (type);
1248 field = build_decl (DECL_SOURCE_LOCATION (var),
1249 FIELD_DECL, DECL_NAME (var), type);
1251 /* Remember what variable this field was created for. This does have a
1252 side effect of making dwarf2out ignore this member, so for helpful
1253 debugging we clear it later in delete_omp_context. */
1254 DECL_ABSTRACT_ORIGIN (field) = var;
1255 if (type == TREE_TYPE (var))
1257 DECL_ALIGN (field) = DECL_ALIGN (var);
1258 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1259 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1261 else
1262 DECL_ALIGN (field) = TYPE_ALIGN (type);
1264 if ((mask & 3) == 3)
1266 insert_field_into_struct (ctx->record_type, field);
1267 if (ctx->srecord_type)
1269 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1270 FIELD_DECL, DECL_NAME (var), type);
1271 DECL_ABSTRACT_ORIGIN (sfield) = var;
1272 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1273 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1274 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1275 insert_field_into_struct (ctx->srecord_type, sfield);
1278 else
1280 if (ctx->srecord_type == NULL_TREE)
1282 tree t;
1284 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1285 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1286 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1288 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1289 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1290 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1291 insert_field_into_struct (ctx->srecord_type, sfield);
1292 splay_tree_insert (ctx->sfield_map,
1293 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1294 (splay_tree_value) sfield);
1297 sfield = field;
1298 insert_field_into_struct ((mask & 1) ? ctx->record_type
1299 : ctx->srecord_type, field);
1302 if (mask & 1)
1303 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1304 (splay_tree_value) field);
1305 if ((mask & 2) && ctx->sfield_map)
1306 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1307 (splay_tree_value) sfield);
1310 static tree
1311 install_var_local (tree var, omp_context *ctx)
1313 tree new_var = omp_copy_decl_1 (var, ctx);
1314 insert_decl_map (&ctx->cb, var, new_var);
1315 return new_var;
1318 /* Adjust the replacement for DECL in CTX for the new context. This means
1319 copying the DECL_VALUE_EXPR, and fixing up the type. */
1321 static void
1322 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1324 tree new_decl, size;
1326 new_decl = lookup_decl (decl, ctx);
1328 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1330 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1331 && DECL_HAS_VALUE_EXPR_P (decl))
1333 tree ve = DECL_VALUE_EXPR (decl);
1334 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1335 SET_DECL_VALUE_EXPR (new_decl, ve);
1336 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1339 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1341 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1342 if (size == error_mark_node)
1343 size = TYPE_SIZE (TREE_TYPE (new_decl));
1344 DECL_SIZE (new_decl) = size;
1346 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1347 if (size == error_mark_node)
1348 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1349 DECL_SIZE_UNIT (new_decl) = size;
1353 /* The callback for remap_decl. Search all containing contexts for a
1354 mapping of the variable; this avoids having to duplicate the splay
1355 tree ahead of time. We know a mapping doesn't already exist in the
1356 given context. Create new mappings to implement default semantics. */
1358 static tree
1359 omp_copy_decl (tree var, copy_body_data *cb)
1361 omp_context *ctx = (omp_context *) cb;
1362 tree new_var;
1364 if (TREE_CODE (var) == LABEL_DECL)
1366 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1367 DECL_CONTEXT (new_var) = current_function_decl;
1368 insert_decl_map (&ctx->cb, var, new_var);
1369 return new_var;
1372 while (!is_taskreg_ctx (ctx))
1374 ctx = ctx->outer;
1375 if (ctx == NULL)
1376 return var;
1377 new_var = maybe_lookup_decl (var, ctx);
1378 if (new_var)
1379 return new_var;
1382 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1383 return var;
1385 return error_mark_node;
1389 /* Debugging dumps for parallel regions. */
1390 void dump_omp_region (FILE *, struct omp_region *, int);
1391 void debug_omp_region (struct omp_region *);
1392 void debug_all_omp_regions (void);
1394 /* Dump the parallel region tree rooted at REGION. */
1396 void
1397 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1399 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1400 gimple_code_name[region->type]);
1402 if (region->inner)
1403 dump_omp_region (file, region->inner, indent + 4);
1405 if (region->cont)
1407 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1408 region->cont->index);
1411 if (region->exit)
1412 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1413 region->exit->index);
1414 else
1415 fprintf (file, "%*s[no exit marker]\n", indent, "");
1417 if (region->next)
1418 dump_omp_region (file, region->next, indent);
1421 DEBUG_FUNCTION void
1422 debug_omp_region (struct omp_region *region)
1424 dump_omp_region (stderr, region, 0);
1427 DEBUG_FUNCTION void
1428 debug_all_omp_regions (void)
1430 dump_omp_region (stderr, root_omp_region, 0);
1434 /* Create a new parallel region starting at STMT inside region PARENT. */
1436 static struct omp_region *
1437 new_omp_region (basic_block bb, enum gimple_code type,
1438 struct omp_region *parent)
1440 struct omp_region *region = XCNEW (struct omp_region);
1442 region->outer = parent;
1443 region->entry = bb;
1444 region->type = type;
1446 if (parent)
1448 /* This is a nested region. Add it to the list of inner
1449 regions in PARENT. */
1450 region->next = parent->inner;
1451 parent->inner = region;
1453 else
1455 /* This is a toplevel region. Add it to the list of toplevel
1456 regions in ROOT_OMP_REGION. */
1457 region->next = root_omp_region;
1458 root_omp_region = region;
1461 return region;
1464 /* Release the memory associated with the region tree rooted at REGION. */
1466 static void
1467 free_omp_region_1 (struct omp_region *region)
1469 struct omp_region *i, *n;
1471 for (i = region->inner; i ; i = n)
1473 n = i->next;
1474 free_omp_region_1 (i);
1477 free (region);
1480 /* Release the memory for the entire omp region tree. */
1482 void
1483 free_omp_regions (void)
1485 struct omp_region *r, *n;
1486 for (r = root_omp_region; r ; r = n)
1488 n = r->next;
1489 free_omp_region_1 (r);
1491 root_omp_region = NULL;
1495 /* Create a new context, with OUTER_CTX being the surrounding context. */
1497 static omp_context *
1498 new_omp_context (gimple stmt, omp_context *outer_ctx)
1500 omp_context *ctx = XCNEW (omp_context);
1502 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1503 (splay_tree_value) ctx);
1504 ctx->stmt = stmt;
1506 if (outer_ctx)
1508 ctx->outer = outer_ctx;
1509 ctx->cb = outer_ctx->cb;
1510 ctx->cb.block = NULL;
1511 ctx->depth = outer_ctx->depth + 1;
1512 ctx->reduction_map = outer_ctx->reduction_map;
1514 else
1516 ctx->cb.src_fn = current_function_decl;
1517 ctx->cb.dst_fn = current_function_decl;
1518 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1519 gcc_checking_assert (ctx->cb.src_node);
1520 ctx->cb.dst_node = ctx->cb.src_node;
1521 ctx->cb.src_cfun = cfun;
1522 ctx->cb.copy_decl = omp_copy_decl;
1523 ctx->cb.eh_lp_nr = 0;
1524 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1525 ctx->depth = 1;
1528 ctx->cb.decl_map = new hash_map<tree, tree>;
1530 return ctx;
1533 static gimple_seq maybe_catch_exception (gimple_seq);
1535 /* Finalize task copyfn. */
1537 static void
1538 finalize_task_copyfn (gomp_task *task_stmt)
1540 struct function *child_cfun;
1541 tree child_fn;
1542 gimple_seq seq = NULL, new_seq;
1543 gbind *bind;
1545 child_fn = gimple_omp_task_copy_fn (task_stmt);
1546 if (child_fn == NULL_TREE)
1547 return;
1549 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1550 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1552 push_cfun (child_cfun);
1553 bind = gimplify_body (child_fn, false);
1554 gimple_seq_add_stmt (&seq, bind);
1555 new_seq = maybe_catch_exception (seq);
1556 if (new_seq != seq)
1558 bind = gimple_build_bind (NULL, new_seq, NULL);
1559 seq = NULL;
1560 gimple_seq_add_stmt (&seq, bind);
1562 gimple_set_body (child_fn, seq);
1563 pop_cfun ();
1565 /* Inform the callgraph about the new function. */
1566 cgraph_node::add_new_function (child_fn, false);
1567 cgraph_node::get (child_fn)->parallelized_function = 1;
1570 /* Destroy a omp_context data structures. Called through the splay tree
1571 value delete callback. */
1573 static void
1574 delete_omp_context (splay_tree_value value)
1576 omp_context *ctx = (omp_context *) value;
1578 delete ctx->cb.decl_map;
1580 if (ctx->field_map)
1581 splay_tree_delete (ctx->field_map);
1582 if (ctx->sfield_map)
1583 splay_tree_delete (ctx->sfield_map);
1584 /* Reduction map is copied to nested contexts, so only delete it in the
1585 owner. */
1586 if (ctx->reduction_map
1587 && gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
1588 && is_gimple_omp_offloaded (ctx->stmt)
1589 && is_gimple_omp_oacc (ctx->stmt))
1590 splay_tree_delete (ctx->reduction_map);
1592 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1593 it produces corrupt debug information. */
1594 if (ctx->record_type)
1596 tree t;
1597 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1598 DECL_ABSTRACT_ORIGIN (t) = NULL;
1600 if (ctx->srecord_type)
1602 tree t;
1603 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1604 DECL_ABSTRACT_ORIGIN (t) = NULL;
1607 if (is_task_ctx (ctx))
1608 finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
1610 XDELETE (ctx);
1613 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1614 context. */
1616 static void
1617 fixup_child_record_type (omp_context *ctx)
1619 tree f, type = ctx->record_type;
1621 /* ??? It isn't sufficient to just call remap_type here, because
1622 variably_modified_type_p doesn't work the way we expect for
1623 record types. Testing each field for whether it needs remapping
1624 and creating a new record by hand works, however. */
1625 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1626 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1627 break;
1628 if (f)
1630 tree name, new_fields = NULL;
1632 type = lang_hooks.types.make_type (RECORD_TYPE);
1633 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1634 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1635 TYPE_DECL, name, type);
1636 TYPE_NAME (type) = name;
1638 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1640 tree new_f = copy_node (f);
1641 DECL_CONTEXT (new_f) = type;
1642 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1643 DECL_CHAIN (new_f) = new_fields;
1644 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1645 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1646 &ctx->cb, NULL);
1647 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1648 &ctx->cb, NULL);
1649 new_fields = new_f;
1651 /* Arrange to be able to look up the receiver field
1652 given the sender field. */
1653 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1654 (splay_tree_value) new_f);
1656 TYPE_FIELDS (type) = nreverse (new_fields);
1657 layout_type (type);
1660 TREE_TYPE (ctx->receiver_decl)
1661 = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
1664 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1665 specified by CLAUSES. */
1667 static void
1668 scan_sharing_clauses (tree clauses, omp_context *ctx)
1670 tree c, decl;
1671 bool scan_array_reductions = false;
1673 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1675 bool by_ref;
1677 switch (OMP_CLAUSE_CODE (c))
1679 case OMP_CLAUSE_PRIVATE:
1680 decl = OMP_CLAUSE_DECL (c);
1681 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1682 goto do_private;
1683 else if (!is_variable_sized (decl))
1684 install_var_local (decl, ctx);
1685 break;
1687 case OMP_CLAUSE_SHARED:
1688 decl = OMP_CLAUSE_DECL (c);
1689 /* Ignore shared directives in teams construct. */
1690 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1692 /* Global variables don't need to be copied,
1693 the receiver side will use them directly. */
1694 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1695 if (is_global_var (odecl))
1696 break;
1697 insert_decl_map (&ctx->cb, decl, odecl);
1698 break;
1700 gcc_assert (is_taskreg_ctx (ctx));
1701 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1702 || !is_variable_sized (decl));
1703 /* Global variables don't need to be copied,
1704 the receiver side will use them directly. */
1705 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1706 break;
1707 by_ref = use_pointer_for_field (decl, ctx);
1708 if (! TREE_READONLY (decl)
1709 || TREE_ADDRESSABLE (decl)
1710 || by_ref
1711 || is_reference (decl))
1713 install_var_field (decl, by_ref, 3, ctx);
1714 install_var_local (decl, ctx);
1715 break;
1717 /* We don't need to copy const scalar vars back. */
1718 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1719 goto do_private;
1721 case OMP_CLAUSE_LASTPRIVATE:
1722 /* Let the corresponding firstprivate clause create
1723 the variable. */
1724 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1725 break;
1726 /* FALLTHRU */
1728 case OMP_CLAUSE_FIRSTPRIVATE:
1729 if (is_gimple_omp_oacc (ctx->stmt))
1731 sorry ("clause not supported yet");
1732 break;
1734 /* FALLTHRU */
1735 case OMP_CLAUSE_REDUCTION:
1736 case OMP_CLAUSE_LINEAR:
1737 decl = OMP_CLAUSE_DECL (c);
1738 do_private:
1739 if (is_variable_sized (decl))
1741 if (is_task_ctx (ctx))
1742 install_var_field (decl, false, 1, ctx);
1743 break;
1745 else if (is_taskreg_ctx (ctx))
1747 bool global
1748 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1749 by_ref = use_pointer_for_field (decl, NULL);
1751 if (is_task_ctx (ctx)
1752 && (global || by_ref || is_reference (decl)))
1754 install_var_field (decl, false, 1, ctx);
1755 if (!global)
1756 install_var_field (decl, by_ref, 2, ctx);
1758 else if (!global)
1759 install_var_field (decl, by_ref, 3, ctx);
1761 install_var_local (decl, ctx);
1762 if (is_gimple_omp_oacc (ctx->stmt)
1763 && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1765 /* Create a decl for the reduction array. */
1766 tree var = OMP_CLAUSE_DECL (c);
1767 tree type = get_base_type (var);
1768 tree ptype = build_pointer_type (type);
1769 tree array = create_tmp_var (ptype,
1770 oacc_get_reduction_array_id (var));
1771 omp_context *c = (ctx->field_map ? ctx : ctx->outer);
1772 install_var_field (array, true, 3, c);
1773 install_var_local (array, c);
1775 /* Insert it into the current context. */
1776 splay_tree_insert (ctx->reduction_map, (splay_tree_key)
1777 oacc_get_reduction_array_id (var),
1778 (splay_tree_value) array);
1779 splay_tree_insert (ctx->reduction_map,
1780 (splay_tree_key) array,
1781 (splay_tree_value) array);
1783 break;
1785 case OMP_CLAUSE__LOOPTEMP_:
1786 gcc_assert (is_parallel_ctx (ctx));
1787 decl = OMP_CLAUSE_DECL (c);
1788 install_var_field (decl, false, 3, ctx);
1789 install_var_local (decl, ctx);
1790 break;
1792 case OMP_CLAUSE_COPYPRIVATE:
1793 case OMP_CLAUSE_COPYIN:
1794 decl = OMP_CLAUSE_DECL (c);
1795 by_ref = use_pointer_for_field (decl, NULL);
1796 install_var_field (decl, by_ref, 3, ctx);
1797 break;
1799 case OMP_CLAUSE_DEFAULT:
1800 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1801 break;
1803 case OMP_CLAUSE_FINAL:
1804 case OMP_CLAUSE_IF:
1805 case OMP_CLAUSE_NUM_THREADS:
1806 case OMP_CLAUSE_NUM_TEAMS:
1807 case OMP_CLAUSE_THREAD_LIMIT:
1808 case OMP_CLAUSE_DEVICE:
1809 case OMP_CLAUSE_SCHEDULE:
1810 case OMP_CLAUSE_DIST_SCHEDULE:
1811 case OMP_CLAUSE_DEPEND:
1812 case OMP_CLAUSE__CILK_FOR_COUNT_:
1813 case OMP_CLAUSE_NUM_GANGS:
1814 case OMP_CLAUSE_NUM_WORKERS:
1815 case OMP_CLAUSE_VECTOR_LENGTH:
1816 if (ctx->outer)
1817 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1818 break;
1820 case OMP_CLAUSE_TO:
1821 case OMP_CLAUSE_FROM:
1822 case OMP_CLAUSE_MAP:
1823 if (ctx->outer)
1824 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1825 decl = OMP_CLAUSE_DECL (c);
1826 /* Global variables with "omp declare target" attribute
1827 don't need to be copied, the receiver side will use them
1828 directly. */
1829 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1830 && DECL_P (decl)
1831 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1832 && varpool_node::get_create (decl)->offloadable)
1833 break;
1834 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1835 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
1837 /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
1838 not offloaded; there is nothing to map for those. */
1839 if (!is_gimple_omp_offloaded (ctx->stmt)
1840 && !POINTER_TYPE_P (TREE_TYPE (decl))
1841 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
1842 break;
1844 if (DECL_P (decl))
1846 if (DECL_SIZE (decl)
1847 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1849 tree decl2 = DECL_VALUE_EXPR (decl);
1850 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1851 decl2 = TREE_OPERAND (decl2, 0);
1852 gcc_assert (DECL_P (decl2));
1853 install_var_field (decl2, true, 3, ctx);
1854 install_var_local (decl2, ctx);
1855 install_var_local (decl, ctx);
1857 else
1859 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1860 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1861 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1862 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1863 install_var_field (decl, true, 7, ctx);
1864 else
1865 install_var_field (decl, true, 3, ctx);
1866 if (is_gimple_omp_offloaded (ctx->stmt))
1867 install_var_local (decl, ctx);
1870 else
1872 tree base = get_base_address (decl);
1873 tree nc = OMP_CLAUSE_CHAIN (c);
1874 if (DECL_P (base)
1875 && nc != NULL_TREE
1876 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1877 && OMP_CLAUSE_DECL (nc) == base
1878 && OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
1879 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1881 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1882 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1884 else
1886 if (ctx->outer)
1888 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1889 decl = OMP_CLAUSE_DECL (c);
1891 gcc_assert (!splay_tree_lookup (ctx->field_map,
1892 (splay_tree_key) decl));
1893 tree field
1894 = build_decl (OMP_CLAUSE_LOCATION (c),
1895 FIELD_DECL, NULL_TREE, ptr_type_node);
1896 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1897 insert_field_into_struct (ctx->record_type, field);
1898 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1899 (splay_tree_value) field);
1902 break;
1904 case OMP_CLAUSE_NOWAIT:
1905 case OMP_CLAUSE_ORDERED:
1906 case OMP_CLAUSE_COLLAPSE:
1907 case OMP_CLAUSE_UNTIED:
1908 case OMP_CLAUSE_MERGEABLE:
1909 case OMP_CLAUSE_PROC_BIND:
1910 case OMP_CLAUSE_SAFELEN:
1911 case OMP_CLAUSE_ASYNC:
1912 case OMP_CLAUSE_WAIT:
1913 case OMP_CLAUSE_GANG:
1914 case OMP_CLAUSE_WORKER:
1915 case OMP_CLAUSE_VECTOR:
1916 break;
1918 case OMP_CLAUSE_ALIGNED:
1919 decl = OMP_CLAUSE_DECL (c);
1920 if (is_global_var (decl)
1921 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1922 install_var_local (decl, ctx);
1923 break;
1925 case OMP_CLAUSE_DEVICE_RESIDENT:
1926 case OMP_CLAUSE_USE_DEVICE:
1927 case OMP_CLAUSE__CACHE_:
1928 case OMP_CLAUSE_INDEPENDENT:
1929 case OMP_CLAUSE_AUTO:
1930 case OMP_CLAUSE_SEQ:
1931 sorry ("Clause not supported yet");
1932 break;
1934 default:
1935 gcc_unreachable ();
1939 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1941 switch (OMP_CLAUSE_CODE (c))
1943 case OMP_CLAUSE_LASTPRIVATE:
1944 /* Let the corresponding firstprivate clause create
1945 the variable. */
1946 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1947 scan_array_reductions = true;
1948 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1949 break;
1950 /* FALLTHRU */
1952 case OMP_CLAUSE_FIRSTPRIVATE:
1953 if (is_gimple_omp_oacc (ctx->stmt))
1955 sorry ("clause not supported yet");
1956 break;
1958 /* FALLTHRU */
1959 case OMP_CLAUSE_PRIVATE:
1960 case OMP_CLAUSE_REDUCTION:
1961 case OMP_CLAUSE_LINEAR:
1962 decl = OMP_CLAUSE_DECL (c);
1963 if (is_variable_sized (decl))
1964 install_var_local (decl, ctx);
1965 fixup_remapped_decl (decl, ctx,
1966 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1967 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1968 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1969 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1970 scan_array_reductions = true;
1971 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1972 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1973 scan_array_reductions = true;
1974 break;
1976 case OMP_CLAUSE_SHARED:
1977 /* Ignore shared directives in teams construct. */
1978 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1979 break;
1980 decl = OMP_CLAUSE_DECL (c);
1981 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1982 fixup_remapped_decl (decl, ctx, false);
1983 break;
1985 case OMP_CLAUSE_MAP:
1986 if (!is_gimple_omp_offloaded (ctx->stmt))
1987 break;
1988 decl = OMP_CLAUSE_DECL (c);
1989 if (DECL_P (decl)
1990 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1991 && varpool_node::get_create (decl)->offloadable)
1992 break;
1993 if (DECL_P (decl))
1995 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1996 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1997 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1999 tree new_decl = lookup_decl (decl, ctx);
2000 TREE_TYPE (new_decl)
2001 = remap_type (TREE_TYPE (decl), &ctx->cb);
2003 else if (DECL_SIZE (decl)
2004 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2006 tree decl2 = DECL_VALUE_EXPR (decl);
2007 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2008 decl2 = TREE_OPERAND (decl2, 0);
2009 gcc_assert (DECL_P (decl2));
2010 fixup_remapped_decl (decl2, ctx, false);
2011 fixup_remapped_decl (decl, ctx, true);
2013 else
2014 fixup_remapped_decl (decl, ctx, false);
2016 break;
2018 case OMP_CLAUSE_COPYPRIVATE:
2019 case OMP_CLAUSE_COPYIN:
2020 case OMP_CLAUSE_DEFAULT:
2021 case OMP_CLAUSE_IF:
2022 case OMP_CLAUSE_NUM_THREADS:
2023 case OMP_CLAUSE_NUM_TEAMS:
2024 case OMP_CLAUSE_THREAD_LIMIT:
2025 case OMP_CLAUSE_DEVICE:
2026 case OMP_CLAUSE_SCHEDULE:
2027 case OMP_CLAUSE_DIST_SCHEDULE:
2028 case OMP_CLAUSE_NOWAIT:
2029 case OMP_CLAUSE_ORDERED:
2030 case OMP_CLAUSE_COLLAPSE:
2031 case OMP_CLAUSE_UNTIED:
2032 case OMP_CLAUSE_FINAL:
2033 case OMP_CLAUSE_MERGEABLE:
2034 case OMP_CLAUSE_PROC_BIND:
2035 case OMP_CLAUSE_SAFELEN:
2036 case OMP_CLAUSE_ALIGNED:
2037 case OMP_CLAUSE_DEPEND:
2038 case OMP_CLAUSE__LOOPTEMP_:
2039 case OMP_CLAUSE_TO:
2040 case OMP_CLAUSE_FROM:
2041 case OMP_CLAUSE__CILK_FOR_COUNT_:
2042 case OMP_CLAUSE_ASYNC:
2043 case OMP_CLAUSE_WAIT:
2044 case OMP_CLAUSE_NUM_GANGS:
2045 case OMP_CLAUSE_NUM_WORKERS:
2046 case OMP_CLAUSE_VECTOR_LENGTH:
2047 case OMP_CLAUSE_GANG:
2048 case OMP_CLAUSE_WORKER:
2049 case OMP_CLAUSE_VECTOR:
2050 break;
2052 case OMP_CLAUSE_DEVICE_RESIDENT:
2053 case OMP_CLAUSE_USE_DEVICE:
2054 case OMP_CLAUSE__CACHE_:
2055 case OMP_CLAUSE_INDEPENDENT:
2056 case OMP_CLAUSE_AUTO:
2057 case OMP_CLAUSE_SEQ:
2058 sorry ("Clause not supported yet");
2059 break;
2061 default:
2062 gcc_unreachable ();
2066 gcc_checking_assert (!scan_array_reductions
2067 || !is_gimple_omp_oacc (ctx->stmt));
2068 if (scan_array_reductions)
2069 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2070 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
2071 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2073 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2074 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2076 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2077 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2078 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2079 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2080 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
2081 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
2084 /* Create a new name for omp child function. Returns an identifier. If
2085 IS_CILK_FOR is true then the suffix for the child function is
2086 "_cilk_for_fn." */
2088 static tree
2089 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
2091 if (is_cilk_for)
2092 return clone_function_name (current_function_decl, "_cilk_for_fn");
2093 return clone_function_name (current_function_decl,
2094 task_copy ? "_omp_cpyfn" : "_omp_fn");
2097 /* Returns the type of the induction variable for the child function for
2098 _Cilk_for and the types for _high and _low variables based on TYPE. */
2100 static tree
2101 cilk_for_check_loop_diff_type (tree type)
2103 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
2105 if (TYPE_UNSIGNED (type))
2106 return uint32_type_node;
2107 else
2108 return integer_type_node;
2110 else
2112 if (TYPE_UNSIGNED (type))
2113 return uint64_type_node;
2114 else
2115 return long_long_integer_type_node;
2119 /* Build a decl for the omp child function. It'll not contain a body
2120 yet, just the bare decl. */
2122 static void
2123 create_omp_child_function (omp_context *ctx, bool task_copy)
2125 tree decl, type, name, t;
2127 tree cilk_for_count
2128 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2129 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2130 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
2131 tree cilk_var_type = NULL_TREE;
2133 name = create_omp_child_function_name (task_copy,
2134 cilk_for_count != NULL_TREE);
2135 if (task_copy)
2136 type = build_function_type_list (void_type_node, ptr_type_node,
2137 ptr_type_node, NULL_TREE);
2138 else if (cilk_for_count)
2140 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
2141 cilk_var_type = cilk_for_check_loop_diff_type (type);
2142 type = build_function_type_list (void_type_node, ptr_type_node,
2143 cilk_var_type, cilk_var_type, NULL_TREE);
2145 else
2146 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
2148 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
2150 gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
2151 || !task_copy);
2152 if (!task_copy)
2153 ctx->cb.dst_fn = decl;
2154 else
2155 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
2157 TREE_STATIC (decl) = 1;
2158 TREE_USED (decl) = 1;
2159 DECL_ARTIFICIAL (decl) = 1;
2160 DECL_IGNORED_P (decl) = 0;
2161 TREE_PUBLIC (decl) = 0;
2162 DECL_UNINLINABLE (decl) = 1;
2163 DECL_EXTERNAL (decl) = 0;
2164 DECL_CONTEXT (decl) = NULL_TREE;
2165 DECL_INITIAL (decl) = make_node (BLOCK);
2166 if (cgraph_node::get (current_function_decl)->offloadable)
2167 cgraph_node::get_create (decl)->offloadable = 1;
2168 else
2170 omp_context *octx;
2171 for (octx = ctx; octx; octx = octx->outer)
2172 if (is_gimple_omp_offloaded (octx->stmt))
2174 cgraph_node::get_create (decl)->offloadable = 1;
2175 #ifdef ENABLE_OFFLOADING
2176 g->have_offload = true;
2177 #endif
2178 break;
2182 if (cgraph_node::get_create (decl)->offloadable
2183 && !lookup_attribute ("omp declare target",
2184 DECL_ATTRIBUTES (current_function_decl)))
2185 DECL_ATTRIBUTES (decl)
2186 = tree_cons (get_identifier ("omp target entrypoint"),
2187 NULL_TREE, DECL_ATTRIBUTES (decl));
2189 t = build_decl (DECL_SOURCE_LOCATION (decl),
2190 RESULT_DECL, NULL_TREE, void_type_node);
2191 DECL_ARTIFICIAL (t) = 1;
2192 DECL_IGNORED_P (t) = 1;
2193 DECL_CONTEXT (t) = decl;
2194 DECL_RESULT (decl) = t;
2196 /* _Cilk_for's child function requires two extra parameters called
2197 __low and __high that are set the by Cilk runtime when it calls this
2198 function. */
2199 if (cilk_for_count)
2201 t = build_decl (DECL_SOURCE_LOCATION (decl),
2202 PARM_DECL, get_identifier ("__high"), cilk_var_type);
2203 DECL_ARTIFICIAL (t) = 1;
2204 DECL_NAMELESS (t) = 1;
2205 DECL_ARG_TYPE (t) = ptr_type_node;
2206 DECL_CONTEXT (t) = current_function_decl;
2207 TREE_USED (t) = 1;
2208 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2209 DECL_ARGUMENTS (decl) = t;
2211 t = build_decl (DECL_SOURCE_LOCATION (decl),
2212 PARM_DECL, get_identifier ("__low"), cilk_var_type);
2213 DECL_ARTIFICIAL (t) = 1;
2214 DECL_NAMELESS (t) = 1;
2215 DECL_ARG_TYPE (t) = ptr_type_node;
2216 DECL_CONTEXT (t) = current_function_decl;
2217 TREE_USED (t) = 1;
2218 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2219 DECL_ARGUMENTS (decl) = t;
2222 tree data_name = get_identifier (".omp_data_i");
2223 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
2224 ptr_type_node);
2225 DECL_ARTIFICIAL (t) = 1;
2226 DECL_NAMELESS (t) = 1;
2227 DECL_ARG_TYPE (t) = ptr_type_node;
2228 DECL_CONTEXT (t) = current_function_decl;
2229 TREE_USED (t) = 1;
2230 if (cilk_for_count)
2231 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2232 DECL_ARGUMENTS (decl) = t;
2233 if (!task_copy)
2234 ctx->receiver_decl = t;
2235 else
2237 t = build_decl (DECL_SOURCE_LOCATION (decl),
2238 PARM_DECL, get_identifier (".omp_data_o"),
2239 ptr_type_node);
2240 DECL_ARTIFICIAL (t) = 1;
2241 DECL_NAMELESS (t) = 1;
2242 DECL_ARG_TYPE (t) = ptr_type_node;
2243 DECL_CONTEXT (t) = current_function_decl;
2244 TREE_USED (t) = 1;
2245 TREE_ADDRESSABLE (t) = 1;
2246 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2247 DECL_ARGUMENTS (decl) = t;
2250 /* Allocate memory for the function structure. The call to
2251 allocate_struct_function clobbers CFUN, so we need to restore
2252 it afterward. */
2253 push_struct_function (decl);
2254 cfun->function_end_locus = gimple_location (ctx->stmt);
2255 pop_cfun ();
2258 /* Callback for walk_gimple_seq. Check if combined parallel
2259 contains gimple_omp_for_combined_into_p OMP_FOR. */
2261 static tree
2262 find_combined_for (gimple_stmt_iterator *gsi_p,
2263 bool *handled_ops_p,
2264 struct walk_stmt_info *wi)
2266 gimple stmt = gsi_stmt (*gsi_p);
2268 *handled_ops_p = true;
2269 switch (gimple_code (stmt))
2271 WALK_SUBSTMTS;
2273 case GIMPLE_OMP_FOR:
2274 if (gimple_omp_for_combined_into_p (stmt)
2275 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2277 wi->info = stmt;
2278 return integer_zero_node;
2280 break;
2281 default:
2282 break;
2284 return NULL;
2287 /* Scan an OpenMP parallel directive. */
2289 static void
2290 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2292 omp_context *ctx;
2293 tree name;
2294 gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
2296 /* Ignore parallel directives with empty bodies, unless there
2297 are copyin clauses. */
2298 if (optimize > 0
2299 && empty_body_p (gimple_omp_body (stmt))
2300 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2301 OMP_CLAUSE_COPYIN) == NULL)
2303 gsi_replace (gsi, gimple_build_nop (), false);
2304 return;
2307 if (gimple_omp_parallel_combined_p (stmt))
2309 struct walk_stmt_info wi;
2311 memset (&wi, 0, sizeof (wi));
2312 wi.val_only = true;
2313 walk_gimple_seq (gimple_omp_body (stmt),
2314 find_combined_for, NULL, &wi);
2315 if (wi.info)
2317 gomp_for *for_stmt = as_a <gomp_for *> ((gimple) wi.info);
2318 struct omp_for_data fd;
2319 extract_omp_for_data (for_stmt, &fd, NULL);
2320 /* We need two temporaries with fd.loop.v type (istart/iend)
2321 and then (fd.collapse - 1) temporaries with the same
2322 type for count2 ... countN-1 vars if not constant. */
2323 size_t count = 2, i;
2324 tree type = fd.iter_type;
2325 if (fd.collapse > 1
2326 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2327 count += fd.collapse - 1;
2328 for (i = 0; i < count; i++)
2330 tree temp = create_tmp_var (type);
2331 tree c = build_omp_clause (UNKNOWN_LOCATION,
2332 OMP_CLAUSE__LOOPTEMP_);
2333 insert_decl_map (&outer_ctx->cb, temp, temp);
2334 OMP_CLAUSE_DECL (c) = temp;
2335 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2336 gimple_omp_parallel_set_clauses (stmt, c);
2341 ctx = new_omp_context (stmt, outer_ctx);
2342 taskreg_contexts.safe_push (ctx);
2343 if (taskreg_nesting_level > 1)
2344 ctx->is_nested = true;
2345 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2346 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2347 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2348 name = create_tmp_var_name (".omp_data_s");
2349 name = build_decl (gimple_location (stmt),
2350 TYPE_DECL, name, ctx->record_type);
2351 DECL_ARTIFICIAL (name) = 1;
2352 DECL_NAMELESS (name) = 1;
2353 TYPE_NAME (ctx->record_type) = name;
2354 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2355 create_omp_child_function (ctx, false);
2356 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2358 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2359 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2361 if (TYPE_FIELDS (ctx->record_type) == NULL)
2362 ctx->record_type = ctx->receiver_decl = NULL;
2365 /* Scan an OpenMP task directive. */
2367 static void
2368 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2370 omp_context *ctx;
2371 tree name, t;
2372 gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
2374 /* Ignore task directives with empty bodies. */
2375 if (optimize > 0
2376 && empty_body_p (gimple_omp_body (stmt)))
2378 gsi_replace (gsi, gimple_build_nop (), false);
2379 return;
2382 ctx = new_omp_context (stmt, outer_ctx);
2383 taskreg_contexts.safe_push (ctx);
2384 if (taskreg_nesting_level > 1)
2385 ctx->is_nested = true;
2386 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2387 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2388 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2389 name = create_tmp_var_name (".omp_data_s");
2390 name = build_decl (gimple_location (stmt),
2391 TYPE_DECL, name, ctx->record_type);
2392 DECL_ARTIFICIAL (name) = 1;
2393 DECL_NAMELESS (name) = 1;
2394 TYPE_NAME (ctx->record_type) = name;
2395 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2396 create_omp_child_function (ctx, false);
2397 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2399 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2401 if (ctx->srecord_type)
2403 name = create_tmp_var_name (".omp_data_a");
2404 name = build_decl (gimple_location (stmt),
2405 TYPE_DECL, name, ctx->srecord_type);
2406 DECL_ARTIFICIAL (name) = 1;
2407 DECL_NAMELESS (name) = 1;
2408 TYPE_NAME (ctx->srecord_type) = name;
2409 TYPE_ARTIFICIAL (ctx->srecord_type) = 1;
2410 create_omp_child_function (ctx, true);
2413 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2415 if (TYPE_FIELDS (ctx->record_type) == NULL)
2417 ctx->record_type = ctx->receiver_decl = NULL;
2418 t = build_int_cst (long_integer_type_node, 0);
2419 gimple_omp_task_set_arg_size (stmt, t);
2420 t = build_int_cst (long_integer_type_node, 1);
2421 gimple_omp_task_set_arg_align (stmt, t);
2426 /* If any decls have been made addressable during scan_omp,
2427 adjust their fields if needed, and layout record types
2428 of parallel/task constructs. */
2430 static void
2431 finish_taskreg_scan (omp_context *ctx)
2433 if (ctx->record_type == NULL_TREE)
2434 return;
2436 /* If any task_shared_vars were needed, verify all
2437 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2438 statements if use_pointer_for_field hasn't changed
2439 because of that. If it did, update field types now. */
2440 if (task_shared_vars)
2442 tree c;
2444 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2445 c; c = OMP_CLAUSE_CHAIN (c))
2446 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
2448 tree decl = OMP_CLAUSE_DECL (c);
2450 /* Global variables don't need to be copied,
2451 the receiver side will use them directly. */
2452 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2453 continue;
2454 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2455 || !use_pointer_for_field (decl, ctx))
2456 continue;
2457 tree field = lookup_field (decl, ctx);
2458 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2459 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2460 continue;
2461 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2462 TREE_THIS_VOLATILE (field) = 0;
2463 DECL_USER_ALIGN (field) = 0;
2464 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2465 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2466 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2467 if (ctx->srecord_type)
2469 tree sfield = lookup_sfield (decl, ctx);
2470 TREE_TYPE (sfield) = TREE_TYPE (field);
2471 TREE_THIS_VOLATILE (sfield) = 0;
2472 DECL_USER_ALIGN (sfield) = 0;
2473 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2474 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2475 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2480 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2482 layout_type (ctx->record_type);
2483 fixup_child_record_type (ctx);
2485 else
2487 location_t loc = gimple_location (ctx->stmt);
2488 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2489 /* Move VLA fields to the end. */
2490 p = &TYPE_FIELDS (ctx->record_type);
2491 while (*p)
2492 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2493 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2495 *q = *p;
2496 *p = TREE_CHAIN (*p);
2497 TREE_CHAIN (*q) = NULL_TREE;
2498 q = &TREE_CHAIN (*q);
2500 else
2501 p = &DECL_CHAIN (*p);
2502 *p = vla_fields;
2503 layout_type (ctx->record_type);
2504 fixup_child_record_type (ctx);
2505 if (ctx->srecord_type)
2506 layout_type (ctx->srecord_type);
2507 tree t = fold_convert_loc (loc, long_integer_type_node,
2508 TYPE_SIZE_UNIT (ctx->record_type));
2509 gimple_omp_task_set_arg_size (ctx->stmt, t);
2510 t = build_int_cst (long_integer_type_node,
2511 TYPE_ALIGN_UNIT (ctx->record_type));
2512 gimple_omp_task_set_arg_align (ctx->stmt, t);
2517 static omp_context *
2518 enclosing_target_ctx (omp_context *ctx)
2520 while (ctx != NULL
2521 && gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
2522 ctx = ctx->outer;
2523 gcc_assert (ctx != NULL);
2524 return ctx;
2527 static bool
2528 oacc_loop_or_target_p (gimple stmt)
2530 enum gimple_code outer_type = gimple_code (stmt);
2531 return ((outer_type == GIMPLE_OMP_TARGET
2532 && ((gimple_omp_target_kind (stmt)
2533 == GF_OMP_TARGET_KIND_OACC_PARALLEL)
2534 || (gimple_omp_target_kind (stmt)
2535 == GF_OMP_TARGET_KIND_OACC_KERNELS)))
2536 || (outer_type == GIMPLE_OMP_FOR
2537 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP));
2540 /* Scan a GIMPLE_OMP_FOR. */
2542 static void
2543 scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
2545 enum gimple_code outer_type = GIMPLE_ERROR_MARK;
2546 omp_context *ctx;
2547 size_t i;
2548 tree clauses = gimple_omp_for_clauses (stmt);
2550 if (outer_ctx)
2551 outer_type = gimple_code (outer_ctx->stmt);
2553 ctx = new_omp_context (stmt, outer_ctx);
2555 if (is_gimple_omp_oacc (stmt))
2557 if (outer_ctx && outer_type == GIMPLE_OMP_FOR)
2558 ctx->gwv_this = outer_ctx->gwv_this;
2559 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2561 int val;
2562 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_GANG)
2563 val = MASK_GANG;
2564 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WORKER)
2565 val = MASK_WORKER;
2566 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR)
2567 val = MASK_VECTOR;
2568 else
2569 continue;
2570 ctx->gwv_this |= val;
2571 if (!outer_ctx)
2573 /* Skip; not nested inside a region. */
2574 continue;
2576 if (!oacc_loop_or_target_p (outer_ctx->stmt))
2578 /* Skip; not nested inside an OpenACC region. */
2579 continue;
2581 if (outer_type == GIMPLE_OMP_FOR)
2582 outer_ctx->gwv_below |= val;
2583 if (OMP_CLAUSE_OPERAND (c, 0) != NULL_TREE)
2585 omp_context *enclosing = enclosing_target_ctx (outer_ctx);
2586 if (gimple_omp_target_kind (enclosing->stmt)
2587 == GF_OMP_TARGET_KIND_OACC_PARALLEL)
2588 error_at (gimple_location (stmt),
2589 "no arguments allowed to gang, worker and vector clauses inside parallel");
2594 scan_sharing_clauses (clauses, ctx);
2596 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2597 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2599 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2600 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2601 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2602 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2604 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2606 if (is_gimple_omp_oacc (stmt))
2608 if (ctx->gwv_this & ctx->gwv_below)
2609 error_at (gimple_location (stmt),
2610 "gang, worker and vector may occur only once in a loop nest");
2611 else if (ctx->gwv_below != 0
2612 && ctx->gwv_this > ctx->gwv_below)
2613 error_at (gimple_location (stmt),
2614 "gang, worker and vector must occur in this order in a loop nest");
2615 if (outer_ctx && outer_type == GIMPLE_OMP_FOR)
2616 outer_ctx->gwv_below |= ctx->gwv_below;
2620 /* Scan an OpenMP sections directive. */
2622 static void
2623 scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
2625 omp_context *ctx;
2627 ctx = new_omp_context (stmt, outer_ctx);
2628 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2629 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2632 /* Scan an OpenMP single directive. */
2634 static void
2635 scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
2637 omp_context *ctx;
2638 tree name;
2640 ctx = new_omp_context (stmt, outer_ctx);
2641 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2642 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2643 name = create_tmp_var_name (".omp_copy_s");
2644 name = build_decl (gimple_location (stmt),
2645 TYPE_DECL, name, ctx->record_type);
2646 TYPE_NAME (ctx->record_type) = name;
2648 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2649 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2651 if (TYPE_FIELDS (ctx->record_type) == NULL)
2652 ctx->record_type = NULL;
2653 else
2654 layout_type (ctx->record_type);
2657 /* Scan a GIMPLE_OMP_TARGET. */
2659 static void
2660 scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
2662 omp_context *ctx;
2663 tree name;
2664 bool offloaded = is_gimple_omp_offloaded (stmt);
2665 tree clauses = gimple_omp_target_clauses (stmt);
2667 ctx = new_omp_context (stmt, outer_ctx);
2668 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2669 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2670 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2671 name = create_tmp_var_name (".omp_data_t");
2672 name = build_decl (gimple_location (stmt),
2673 TYPE_DECL, name, ctx->record_type);
2674 DECL_ARTIFICIAL (name) = 1;
2675 DECL_NAMELESS (name) = 1;
2676 TYPE_NAME (ctx->record_type) = name;
2677 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2678 if (offloaded)
2680 if (is_gimple_omp_oacc (stmt))
2681 ctx->reduction_map = splay_tree_new (splay_tree_compare_pointers,
2682 0, 0);
2684 create_omp_child_function (ctx, false);
2685 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2688 if (is_gimple_omp_oacc (stmt))
2690 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2692 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_GANGS)
2693 ctx->gwv_this |= MASK_GANG;
2694 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_WORKERS)
2695 ctx->gwv_this |= MASK_WORKER;
2696 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR_LENGTH)
2697 ctx->gwv_this |= MASK_VECTOR;
2701 scan_sharing_clauses (clauses, ctx);
2702 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2704 if (TYPE_FIELDS (ctx->record_type) == NULL)
2705 ctx->record_type = ctx->receiver_decl = NULL;
2706 else
2708 TYPE_FIELDS (ctx->record_type)
2709 = nreverse (TYPE_FIELDS (ctx->record_type));
2710 #ifdef ENABLE_CHECKING
2711 tree field;
2712 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2713 for (field = TYPE_FIELDS (ctx->record_type);
2714 field;
2715 field = DECL_CHAIN (field))
2716 gcc_assert (DECL_ALIGN (field) == align);
2717 #endif
2718 layout_type (ctx->record_type);
2719 if (offloaded)
2720 fixup_child_record_type (ctx);
2724 /* Scan an OpenMP teams directive. */
2726 static void
2727 scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
2729 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2730 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2731 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2734 /* Check nesting restrictions. */
2735 static bool
2736 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2738 /* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
2739 inside an OpenACC CTX. */
2740 if (!(is_gimple_omp (stmt)
2741 && is_gimple_omp_oacc (stmt)))
2743 for (omp_context *ctx_ = ctx; ctx_ != NULL; ctx_ = ctx_->outer)
2744 if (is_gimple_omp (ctx_->stmt)
2745 && is_gimple_omp_oacc (ctx_->stmt))
2747 error_at (gimple_location (stmt),
2748 "non-OpenACC construct inside of OpenACC region");
2749 return false;
2753 if (ctx != NULL)
2755 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2756 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2758 error_at (gimple_location (stmt),
2759 "OpenMP constructs may not be nested inside simd region");
2760 return false;
2762 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2764 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2765 || (gimple_omp_for_kind (stmt)
2766 != GF_OMP_FOR_KIND_DISTRIBUTE))
2767 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2769 error_at (gimple_location (stmt),
2770 "only distribute or parallel constructs are allowed to "
2771 "be closely nested inside teams construct");
2772 return false;
2776 switch (gimple_code (stmt))
2778 case GIMPLE_OMP_FOR:
2779 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2780 return true;
2781 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2783 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2785 error_at (gimple_location (stmt),
2786 "distribute construct must be closely nested inside "
2787 "teams construct");
2788 return false;
2790 return true;
2792 /* FALLTHRU */
2793 case GIMPLE_CALL:
2794 if (is_gimple_call (stmt)
2795 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2796 == BUILT_IN_GOMP_CANCEL
2797 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2798 == BUILT_IN_GOMP_CANCELLATION_POINT))
2800 const char *bad = NULL;
2801 const char *kind = NULL;
2802 if (ctx == NULL)
2804 error_at (gimple_location (stmt), "orphaned %qs construct",
2805 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2806 == BUILT_IN_GOMP_CANCEL
2807 ? "#pragma omp cancel"
2808 : "#pragma omp cancellation point");
2809 return false;
2811 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2812 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2813 : 0)
2815 case 1:
2816 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2817 bad = "#pragma omp parallel";
2818 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2819 == BUILT_IN_GOMP_CANCEL
2820 && !integer_zerop (gimple_call_arg (stmt, 1)))
2821 ctx->cancellable = true;
2822 kind = "parallel";
2823 break;
2824 case 2:
2825 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2826 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2827 bad = "#pragma omp for";
2828 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2829 == BUILT_IN_GOMP_CANCEL
2830 && !integer_zerop (gimple_call_arg (stmt, 1)))
2832 ctx->cancellable = true;
2833 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2834 OMP_CLAUSE_NOWAIT))
2835 warning_at (gimple_location (stmt), 0,
2836 "%<#pragma omp cancel for%> inside "
2837 "%<nowait%> for construct");
2838 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2839 OMP_CLAUSE_ORDERED))
2840 warning_at (gimple_location (stmt), 0,
2841 "%<#pragma omp cancel for%> inside "
2842 "%<ordered%> for construct");
2844 kind = "for";
2845 break;
2846 case 4:
2847 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2848 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2849 bad = "#pragma omp sections";
2850 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2851 == BUILT_IN_GOMP_CANCEL
2852 && !integer_zerop (gimple_call_arg (stmt, 1)))
2854 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2856 ctx->cancellable = true;
2857 if (find_omp_clause (gimple_omp_sections_clauses
2858 (ctx->stmt),
2859 OMP_CLAUSE_NOWAIT))
2860 warning_at (gimple_location (stmt), 0,
2861 "%<#pragma omp cancel sections%> inside "
2862 "%<nowait%> sections construct");
2864 else
2866 gcc_assert (ctx->outer
2867 && gimple_code (ctx->outer->stmt)
2868 == GIMPLE_OMP_SECTIONS);
2869 ctx->outer->cancellable = true;
2870 if (find_omp_clause (gimple_omp_sections_clauses
2871 (ctx->outer->stmt),
2872 OMP_CLAUSE_NOWAIT))
2873 warning_at (gimple_location (stmt), 0,
2874 "%<#pragma omp cancel sections%> inside "
2875 "%<nowait%> sections construct");
2878 kind = "sections";
2879 break;
2880 case 8:
2881 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2882 bad = "#pragma omp task";
2883 else
2884 ctx->cancellable = true;
2885 kind = "taskgroup";
2886 break;
2887 default:
2888 error_at (gimple_location (stmt), "invalid arguments");
2889 return false;
2891 if (bad)
2893 error_at (gimple_location (stmt),
2894 "%<%s %s%> construct not closely nested inside of %qs",
2895 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2896 == BUILT_IN_GOMP_CANCEL
2897 ? "#pragma omp cancel"
2898 : "#pragma omp cancellation point", kind, bad);
2899 return false;
2902 /* FALLTHRU */
2903 case GIMPLE_OMP_SECTIONS:
2904 case GIMPLE_OMP_SINGLE:
2905 for (; ctx != NULL; ctx = ctx->outer)
2906 switch (gimple_code (ctx->stmt))
2908 case GIMPLE_OMP_FOR:
2909 case GIMPLE_OMP_SECTIONS:
2910 case GIMPLE_OMP_SINGLE:
2911 case GIMPLE_OMP_ORDERED:
2912 case GIMPLE_OMP_MASTER:
2913 case GIMPLE_OMP_TASK:
2914 case GIMPLE_OMP_CRITICAL:
2915 if (is_gimple_call (stmt))
2917 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2918 != BUILT_IN_GOMP_BARRIER)
2919 return true;
2920 error_at (gimple_location (stmt),
2921 "barrier region may not be closely nested inside "
2922 "of work-sharing, critical, ordered, master or "
2923 "explicit task region");
2924 return false;
2926 error_at (gimple_location (stmt),
2927 "work-sharing region may not be closely nested inside "
2928 "of work-sharing, critical, ordered, master or explicit "
2929 "task region");
2930 return false;
2931 case GIMPLE_OMP_PARALLEL:
2932 return true;
2933 default:
2934 break;
2936 break;
2937 case GIMPLE_OMP_MASTER:
2938 for (; ctx != NULL; ctx = ctx->outer)
2939 switch (gimple_code (ctx->stmt))
2941 case GIMPLE_OMP_FOR:
2942 case GIMPLE_OMP_SECTIONS:
2943 case GIMPLE_OMP_SINGLE:
2944 case GIMPLE_OMP_TASK:
2945 error_at (gimple_location (stmt),
2946 "master region may not be closely nested inside "
2947 "of work-sharing or explicit task region");
2948 return false;
2949 case GIMPLE_OMP_PARALLEL:
2950 return true;
2951 default:
2952 break;
2954 break;
2955 case GIMPLE_OMP_ORDERED:
2956 for (; ctx != NULL; ctx = ctx->outer)
2957 switch (gimple_code (ctx->stmt))
2959 case GIMPLE_OMP_CRITICAL:
2960 case GIMPLE_OMP_TASK:
2961 error_at (gimple_location (stmt),
2962 "ordered region may not be closely nested inside "
2963 "of critical or explicit task region");
2964 return false;
2965 case GIMPLE_OMP_FOR:
2966 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2967 OMP_CLAUSE_ORDERED) == NULL)
2969 error_at (gimple_location (stmt),
2970 "ordered region must be closely nested inside "
2971 "a loop region with an ordered clause");
2972 return false;
2974 return true;
2975 case GIMPLE_OMP_PARALLEL:
2976 error_at (gimple_location (stmt),
2977 "ordered region must be closely nested inside "
2978 "a loop region with an ordered clause");
2979 return false;
2980 default:
2981 break;
2983 break;
2984 case GIMPLE_OMP_CRITICAL:
2986 tree this_stmt_name
2987 = gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
2988 for (; ctx != NULL; ctx = ctx->outer)
2989 if (gomp_critical *other_crit
2990 = dyn_cast <gomp_critical *> (ctx->stmt))
2991 if (this_stmt_name == gimple_omp_critical_name (other_crit))
2993 error_at (gimple_location (stmt),
2994 "critical region may not be nested inside a critical "
2995 "region with the same name");
2996 return false;
2999 break;
3000 case GIMPLE_OMP_TEAMS:
3001 if (ctx == NULL
3002 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
3003 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
3005 error_at (gimple_location (stmt),
3006 "teams construct not closely nested inside of target "
3007 "region");
3008 return false;
3010 break;
3011 case GIMPLE_OMP_TARGET:
3012 for (; ctx != NULL; ctx = ctx->outer)
3014 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
3016 if (is_gimple_omp (stmt)
3017 && is_gimple_omp_oacc (stmt)
3018 && is_gimple_omp (ctx->stmt))
3020 error_at (gimple_location (stmt),
3021 "OpenACC construct inside of non-OpenACC region");
3022 return false;
3024 continue;
3027 const char *stmt_name, *ctx_stmt_name;
3028 switch (gimple_omp_target_kind (stmt))
3030 case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
3031 case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
3032 case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
3033 case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
3034 case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
3035 case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
3036 case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
3037 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: stmt_name = "enter/exit data"; break;
3038 default: gcc_unreachable ();
3040 switch (gimple_omp_target_kind (ctx->stmt))
3042 case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
3043 case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
3044 case GF_OMP_TARGET_KIND_OACC_PARALLEL: ctx_stmt_name = "parallel"; break;
3045 case GF_OMP_TARGET_KIND_OACC_KERNELS: ctx_stmt_name = "kernels"; break;
3046 case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
3047 default: gcc_unreachable ();
3050 /* OpenACC/OpenMP mismatch? */
3051 if (is_gimple_omp_oacc (stmt)
3052 != is_gimple_omp_oacc (ctx->stmt))
3054 error_at (gimple_location (stmt),
3055 "%s %s construct inside of %s %s region",
3056 (is_gimple_omp_oacc (stmt)
3057 ? "OpenACC" : "OpenMP"), stmt_name,
3058 (is_gimple_omp_oacc (ctx->stmt)
3059 ? "OpenACC" : "OpenMP"), ctx_stmt_name);
3060 return false;
3062 if (is_gimple_omp_offloaded (ctx->stmt))
3064 /* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
3065 if (is_gimple_omp_oacc (ctx->stmt))
3067 error_at (gimple_location (stmt),
3068 "%s construct inside of %s region",
3069 stmt_name, ctx_stmt_name);
3070 return false;
3072 else
3074 gcc_checking_assert (!is_gimple_omp_oacc (stmt));
3075 warning_at (gimple_location (stmt), 0,
3076 "%s construct inside of %s region",
3077 stmt_name, ctx_stmt_name);
3081 break;
3082 default:
3083 break;
3085 return true;
3089 /* Helper function scan_omp.
3091 Callback for walk_tree or operators in walk_gimple_stmt used to
3092 scan for OMP directives in TP. */
3094 static tree
3095 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
3097 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
3098 omp_context *ctx = (omp_context *) wi->info;
3099 tree t = *tp;
3101 switch (TREE_CODE (t))
3103 case VAR_DECL:
3104 case PARM_DECL:
3105 case LABEL_DECL:
3106 case RESULT_DECL:
3107 if (ctx)
3108 *tp = remap_decl (t, &ctx->cb);
3109 break;
3111 default:
3112 if (ctx && TYPE_P (t))
3113 *tp = remap_type (t, &ctx->cb);
3114 else if (!DECL_P (t))
3116 *walk_subtrees = 1;
3117 if (ctx)
3119 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
3120 if (tem != TREE_TYPE (t))
3122 if (TREE_CODE (t) == INTEGER_CST)
3123 *tp = wide_int_to_tree (tem, t);
3124 else
3125 TREE_TYPE (t) = tem;
3129 break;
3132 return NULL_TREE;
3135 /* Return true if FNDECL is a setjmp or a longjmp. */
3137 static bool
3138 setjmp_or_longjmp_p (const_tree fndecl)
3140 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3141 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
3142 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
3143 return true;
3145 tree declname = DECL_NAME (fndecl);
3146 if (!declname)
3147 return false;
3148 const char *name = IDENTIFIER_POINTER (declname);
3149 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
3153 /* Helper function for scan_omp.
3155 Callback for walk_gimple_stmt used to scan for OMP directives in
3156 the current statement in GSI. */
3158 static tree
3159 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
3160 struct walk_stmt_info *wi)
3162 gimple stmt = gsi_stmt (*gsi);
3163 omp_context *ctx = (omp_context *) wi->info;
3165 if (gimple_has_location (stmt))
3166 input_location = gimple_location (stmt);
3168 /* Check the nesting restrictions. */
3169 bool remove = false;
3170 if (is_gimple_omp (stmt))
3171 remove = !check_omp_nesting_restrictions (stmt, ctx);
3172 else if (is_gimple_call (stmt))
3174 tree fndecl = gimple_call_fndecl (stmt);
3175 if (fndecl)
3177 if (setjmp_or_longjmp_p (fndecl)
3178 && ctx
3179 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3180 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3182 remove = true;
3183 error_at (gimple_location (stmt),
3184 "setjmp/longjmp inside simd construct");
3186 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3187 switch (DECL_FUNCTION_CODE (fndecl))
3189 case BUILT_IN_GOMP_BARRIER:
3190 case BUILT_IN_GOMP_CANCEL:
3191 case BUILT_IN_GOMP_CANCELLATION_POINT:
3192 case BUILT_IN_GOMP_TASKYIELD:
3193 case BUILT_IN_GOMP_TASKWAIT:
3194 case BUILT_IN_GOMP_TASKGROUP_START:
3195 case BUILT_IN_GOMP_TASKGROUP_END:
3196 remove = !check_omp_nesting_restrictions (stmt, ctx);
3197 break;
3198 default:
3199 break;
3203 if (remove)
3205 stmt = gimple_build_nop ();
3206 gsi_replace (gsi, stmt, false);
3209 *handled_ops_p = true;
3211 switch (gimple_code (stmt))
3213 case GIMPLE_OMP_PARALLEL:
3214 taskreg_nesting_level++;
3215 scan_omp_parallel (gsi, ctx);
3216 taskreg_nesting_level--;
3217 break;
3219 case GIMPLE_OMP_TASK:
3220 taskreg_nesting_level++;
3221 scan_omp_task (gsi, ctx);
3222 taskreg_nesting_level--;
3223 break;
3225 case GIMPLE_OMP_FOR:
3226 scan_omp_for (as_a <gomp_for *> (stmt), ctx);
3227 break;
3229 case GIMPLE_OMP_SECTIONS:
3230 scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
3231 break;
3233 case GIMPLE_OMP_SINGLE:
3234 scan_omp_single (as_a <gomp_single *> (stmt), ctx);
3235 break;
3237 case GIMPLE_OMP_SECTION:
3238 case GIMPLE_OMP_MASTER:
3239 case GIMPLE_OMP_TASKGROUP:
3240 case GIMPLE_OMP_ORDERED:
3241 case GIMPLE_OMP_CRITICAL:
3242 ctx = new_omp_context (stmt, ctx);
3243 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3244 break;
3246 case GIMPLE_OMP_TARGET:
3247 scan_omp_target (as_a <gomp_target *> (stmt), ctx);
3248 break;
3250 case GIMPLE_OMP_TEAMS:
3251 scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
3252 break;
3254 case GIMPLE_BIND:
3256 tree var;
3258 *handled_ops_p = false;
3259 if (ctx)
3260 for (var = gimple_bind_vars (as_a <gbind *> (stmt));
3261 var ;
3262 var = DECL_CHAIN (var))
3263 insert_decl_map (&ctx->cb, var, var);
3265 break;
3266 default:
3267 *handled_ops_p = false;
3268 break;
3271 return NULL_TREE;
3275 /* Scan all the statements starting at the current statement. CTX
3276 contains context information about the OMP directives and
3277 clauses found during the scan. */
3279 static void
3280 scan_omp (gimple_seq *body_p, omp_context *ctx)
3282 location_t saved_location;
3283 struct walk_stmt_info wi;
3285 memset (&wi, 0, sizeof (wi));
3286 wi.info = ctx;
3287 wi.want_locations = true;
3289 saved_location = input_location;
3290 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
3291 input_location = saved_location;
3294 /* Re-gimplification and code generation routines. */
3296 /* Build a call to GOMP_barrier. */
3298 static gimple
3299 build_omp_barrier (tree lhs)
3301 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
3302 : BUILT_IN_GOMP_BARRIER);
3303 gcall *g = gimple_build_call (fndecl, 0);
3304 if (lhs)
3305 gimple_call_set_lhs (g, lhs);
3306 return g;
3309 /* If a context was created for STMT when it was scanned, return it. */
3311 static omp_context *
3312 maybe_lookup_ctx (gimple stmt)
3314 splay_tree_node n;
3315 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
3316 return n ? (omp_context *) n->value : NULL;
3320 /* Find the mapping for DECL in CTX or the immediately enclosing
3321 context that has a mapping for DECL.
3323 If CTX is a nested parallel directive, we may have to use the decl
3324 mappings created in CTX's parent context. Suppose that we have the
3325 following parallel nesting (variable UIDs showed for clarity):
3327 iD.1562 = 0;
3328 #omp parallel shared(iD.1562) -> outer parallel
3329 iD.1562 = iD.1562 + 1;
3331 #omp parallel shared (iD.1562) -> inner parallel
3332 iD.1562 = iD.1562 - 1;
3334 Each parallel structure will create a distinct .omp_data_s structure
3335 for copying iD.1562 in/out of the directive:
3337 outer parallel .omp_data_s.1.i -> iD.1562
3338 inner parallel .omp_data_s.2.i -> iD.1562
3340 A shared variable mapping will produce a copy-out operation before
3341 the parallel directive and a copy-in operation after it. So, in
3342 this case we would have:
3344 iD.1562 = 0;
3345 .omp_data_o.1.i = iD.1562;
3346 #omp parallel shared(iD.1562) -> outer parallel
3347 .omp_data_i.1 = &.omp_data_o.1
3348 .omp_data_i.1->i = .omp_data_i.1->i + 1;
3350 .omp_data_o.2.i = iD.1562; -> **
3351 #omp parallel shared(iD.1562) -> inner parallel
3352 .omp_data_i.2 = &.omp_data_o.2
3353 .omp_data_i.2->i = .omp_data_i.2->i - 1;
3356 ** This is a problem. The symbol iD.1562 cannot be referenced
3357 inside the body of the outer parallel region. But since we are
3358 emitting this copy operation while expanding the inner parallel
3359 directive, we need to access the CTX structure of the outer
3360 parallel directive to get the correct mapping:
3362 .omp_data_o.2.i = .omp_data_i.1->i
3364 Since there may be other workshare or parallel directives enclosing
3365 the parallel directive, it may be necessary to walk up the context
3366 parent chain. This is not a problem in general because nested
3367 parallelism happens only rarely. */
3369 static tree
3370 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3372 tree t;
3373 omp_context *up;
3375 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3376 t = maybe_lookup_decl (decl, up);
3378 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
3380 return t ? t : decl;
3384 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
3385 in outer contexts. */
3387 static tree
3388 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3390 tree t = NULL;
3391 omp_context *up;
3393 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3394 t = maybe_lookup_decl (decl, up);
3396 return t ? t : decl;
3400 /* Construct the initialization value for reduction CLAUSE. */
3402 tree
3403 omp_reduction_init (tree clause, tree type)
3405 location_t loc = OMP_CLAUSE_LOCATION (clause);
3406 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
3408 case PLUS_EXPR:
3409 case MINUS_EXPR:
3410 case BIT_IOR_EXPR:
3411 case BIT_XOR_EXPR:
3412 case TRUTH_OR_EXPR:
3413 case TRUTH_ORIF_EXPR:
3414 case TRUTH_XOR_EXPR:
3415 case NE_EXPR:
3416 return build_zero_cst (type);
3418 case MULT_EXPR:
3419 case TRUTH_AND_EXPR:
3420 case TRUTH_ANDIF_EXPR:
3421 case EQ_EXPR:
3422 return fold_convert_loc (loc, type, integer_one_node);
3424 case BIT_AND_EXPR:
3425 return fold_convert_loc (loc, type, integer_minus_one_node);
3427 case MAX_EXPR:
3428 if (SCALAR_FLOAT_TYPE_P (type))
3430 REAL_VALUE_TYPE max, min;
3431 if (HONOR_INFINITIES (type))
3433 real_inf (&max);
3434 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3436 else
3437 real_maxval (&min, 1, TYPE_MODE (type));
3438 return build_real (type, min);
3440 else
3442 gcc_assert (INTEGRAL_TYPE_P (type));
3443 return TYPE_MIN_VALUE (type);
3446 case MIN_EXPR:
3447 if (SCALAR_FLOAT_TYPE_P (type))
3449 REAL_VALUE_TYPE max;
3450 if (HONOR_INFINITIES (type))
3451 real_inf (&max);
3452 else
3453 real_maxval (&max, 0, TYPE_MODE (type));
3454 return build_real (type, max);
3456 else
3458 gcc_assert (INTEGRAL_TYPE_P (type));
3459 return TYPE_MAX_VALUE (type);
3462 default:
3463 gcc_unreachable ();
3467 /* Return alignment to be assumed for var in CLAUSE, which should be
3468 OMP_CLAUSE_ALIGNED. */
3470 static tree
3471 omp_clause_aligned_alignment (tree clause)
3473 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3474 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3476 /* Otherwise return implementation defined alignment. */
3477 unsigned int al = 1;
3478 machine_mode mode, vmode;
3479 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3480 if (vs)
3481 vs = 1 << floor_log2 (vs);
3482 static enum mode_class classes[]
3483 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3484 for (int i = 0; i < 4; i += 2)
3485 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3486 mode != VOIDmode;
3487 mode = GET_MODE_WIDER_MODE (mode))
3489 vmode = targetm.vectorize.preferred_simd_mode (mode);
3490 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3491 continue;
3492 while (vs
3493 && GET_MODE_SIZE (vmode) < vs
3494 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3495 vmode = GET_MODE_2XWIDER_MODE (vmode);
3497 tree type = lang_hooks.types.type_for_mode (mode, 1);
3498 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3499 continue;
3500 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3501 / GET_MODE_SIZE (mode));
3502 if (TYPE_MODE (type) != vmode)
3503 continue;
3504 if (TYPE_ALIGN_UNIT (type) > al)
3505 al = TYPE_ALIGN_UNIT (type);
3507 return build_int_cst (integer_type_node, al);
3510 /* Return maximum possible vectorization factor for the target. */
3512 static int
3513 omp_max_vf (void)
3515 if (!optimize
3516 || optimize_debug
3517 || !flag_tree_loop_optimize
3518 || (!flag_tree_loop_vectorize
3519 && (global_options_set.x_flag_tree_loop_vectorize
3520 || global_options_set.x_flag_tree_vectorize)))
3521 return 1;
3523 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3524 if (vs)
3526 vs = 1 << floor_log2 (vs);
3527 return vs;
3529 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3530 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3531 return GET_MODE_NUNITS (vqimode);
3532 return 1;
3535 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3536 privatization. */
3538 static bool
3539 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3540 tree &idx, tree &lane, tree &ivar, tree &lvar)
3542 if (max_vf == 0)
3544 max_vf = omp_max_vf ();
3545 if (max_vf > 1)
3547 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3548 OMP_CLAUSE_SAFELEN);
3549 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3550 max_vf = 1;
3551 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3552 max_vf) == -1)
3553 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3555 if (max_vf > 1)
3557 idx = create_tmp_var (unsigned_type_node);
3558 lane = create_tmp_var (unsigned_type_node);
3561 if (max_vf == 1)
3562 return false;
3564 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3565 tree avar = create_tmp_var_raw (atype);
3566 if (TREE_ADDRESSABLE (new_var))
3567 TREE_ADDRESSABLE (avar) = 1;
3568 DECL_ATTRIBUTES (avar)
3569 = tree_cons (get_identifier ("omp simd array"), NULL,
3570 DECL_ATTRIBUTES (avar));
3571 gimple_add_tmp_var (avar);
3572 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3573 NULL_TREE, NULL_TREE);
3574 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3575 NULL_TREE, NULL_TREE);
3576 if (DECL_P (new_var))
3578 SET_DECL_VALUE_EXPR (new_var, lvar);
3579 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3581 return true;
3584 /* Helper function of lower_rec_input_clauses. For a reference
3585 in simd reduction, add an underlying variable it will reference. */
3587 static void
3588 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3590 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3591 if (TREE_CONSTANT (z))
3593 const char *name = NULL;
3594 if (DECL_NAME (new_vard))
3595 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3597 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3598 gimple_add_tmp_var (z);
3599 TREE_ADDRESSABLE (z) = 1;
3600 z = build_fold_addr_expr_loc (loc, z);
3601 gimplify_assign (new_vard, z, ilist);
3605 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3606 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3607 private variables. Initialization statements go in ILIST, while calls
3608 to destructors go in DLIST. */
3610 static void
3611 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3612 omp_context *ctx, struct omp_for_data *fd)
3614 tree c, dtor, copyin_seq, x, ptr;
3615 bool copyin_by_ref = false;
3616 bool lastprivate_firstprivate = false;
3617 bool reduction_omp_orig_ref = false;
3618 int pass;
3619 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3620 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3621 int max_vf = 0;
3622 tree lane = NULL_TREE, idx = NULL_TREE;
3623 tree ivar = NULL_TREE, lvar = NULL_TREE;
3624 gimple_seq llist[2] = { NULL, NULL };
3626 copyin_seq = NULL;
3628 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3629 with data sharing clauses referencing variable sized vars. That
3630 is unnecessarily hard to support and very unlikely to result in
3631 vectorized code anyway. */
3632 if (is_simd)
3633 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3634 switch (OMP_CLAUSE_CODE (c))
3636 case OMP_CLAUSE_LINEAR:
3637 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3638 max_vf = 1;
3639 /* FALLTHRU */
3640 case OMP_CLAUSE_REDUCTION:
3641 case OMP_CLAUSE_PRIVATE:
3642 case OMP_CLAUSE_FIRSTPRIVATE:
3643 case OMP_CLAUSE_LASTPRIVATE:
3644 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3645 max_vf = 1;
3646 break;
3647 default:
3648 continue;
3651 /* Do all the fixed sized types in the first pass, and the variable sized
3652 types in the second pass. This makes sure that the scalar arguments to
3653 the variable sized types are processed before we use them in the
3654 variable sized operations. */
3655 for (pass = 0; pass < 2; ++pass)
3657 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3659 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3660 tree var, new_var;
3661 bool by_ref;
3662 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3664 switch (c_kind)
3666 case OMP_CLAUSE_PRIVATE:
3667 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3668 continue;
3669 break;
3670 case OMP_CLAUSE_SHARED:
3671 /* Ignore shared directives in teams construct. */
3672 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3673 continue;
3674 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3676 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3677 continue;
3679 case OMP_CLAUSE_FIRSTPRIVATE:
3680 case OMP_CLAUSE_COPYIN:
3681 case OMP_CLAUSE_LINEAR:
3682 break;
3683 case OMP_CLAUSE_REDUCTION:
3684 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3685 reduction_omp_orig_ref = true;
3686 break;
3687 case OMP_CLAUSE__LOOPTEMP_:
3688 /* Handle _looptemp_ clauses only on parallel. */
3689 if (fd)
3690 continue;
3691 break;
3692 case OMP_CLAUSE_LASTPRIVATE:
3693 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3695 lastprivate_firstprivate = true;
3696 if (pass != 0)
3697 continue;
3699 /* Even without corresponding firstprivate, if
3700 decl is Fortran allocatable, it needs outer var
3701 reference. */
3702 else if (pass == 0
3703 && lang_hooks.decls.omp_private_outer_ref
3704 (OMP_CLAUSE_DECL (c)))
3705 lastprivate_firstprivate = true;
3706 break;
3707 case OMP_CLAUSE_ALIGNED:
3708 if (pass == 0)
3709 continue;
3710 var = OMP_CLAUSE_DECL (c);
3711 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3712 && !is_global_var (var))
3714 new_var = maybe_lookup_decl (var, ctx);
3715 if (new_var == NULL_TREE)
3716 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3717 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3718 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3719 omp_clause_aligned_alignment (c));
3720 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3721 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3722 gimplify_and_add (x, ilist);
3724 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3725 && is_global_var (var))
3727 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3728 new_var = lookup_decl (var, ctx);
3729 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3730 t = build_fold_addr_expr_loc (clause_loc, t);
3731 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3732 t = build_call_expr_loc (clause_loc, t2, 2, t,
3733 omp_clause_aligned_alignment (c));
3734 t = fold_convert_loc (clause_loc, ptype, t);
3735 x = create_tmp_var (ptype);
3736 t = build2 (MODIFY_EXPR, ptype, x, t);
3737 gimplify_and_add (t, ilist);
3738 t = build_simple_mem_ref_loc (clause_loc, x);
3739 SET_DECL_VALUE_EXPR (new_var, t);
3740 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3742 continue;
3743 default:
3744 continue;
3747 new_var = var = OMP_CLAUSE_DECL (c);
3748 if (c_kind != OMP_CLAUSE_COPYIN)
3749 new_var = lookup_decl (var, ctx);
3751 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3753 if (pass != 0)
3754 continue;
3756 else if (is_variable_sized (var))
3758 /* For variable sized types, we need to allocate the
3759 actual storage here. Call alloca and store the
3760 result in the pointer decl that we created elsewhere. */
3761 if (pass == 0)
3762 continue;
3764 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3766 gcall *stmt;
3767 tree tmp, atmp;
3769 ptr = DECL_VALUE_EXPR (new_var);
3770 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3771 ptr = TREE_OPERAND (ptr, 0);
3772 gcc_assert (DECL_P (ptr));
3773 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3775 /* void *tmp = __builtin_alloca */
3776 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3777 stmt = gimple_build_call (atmp, 1, x);
3778 tmp = create_tmp_var_raw (ptr_type_node);
3779 gimple_add_tmp_var (tmp);
3780 gimple_call_set_lhs (stmt, tmp);
3782 gimple_seq_add_stmt (ilist, stmt);
3784 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3785 gimplify_assign (ptr, x, ilist);
3788 else if (is_reference (var))
3790 /* For references that are being privatized for Fortran,
3791 allocate new backing storage for the new pointer
3792 variable. This allows us to avoid changing all the
3793 code that expects a pointer to something that expects
3794 a direct variable. */
3795 if (pass == 0)
3796 continue;
3798 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3799 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3801 x = build_receiver_ref (var, false, ctx);
3802 x = build_fold_addr_expr_loc (clause_loc, x);
3804 else if (TREE_CONSTANT (x))
3806 /* For reduction in SIMD loop, defer adding the
3807 initialization of the reference, because if we decide
3808 to use SIMD array for it, the initilization could cause
3809 expansion ICE. */
3810 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3811 x = NULL_TREE;
3812 else
3814 const char *name = NULL;
3815 if (DECL_NAME (var))
3816 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3818 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3819 name);
3820 gimple_add_tmp_var (x);
3821 TREE_ADDRESSABLE (x) = 1;
3822 x = build_fold_addr_expr_loc (clause_loc, x);
3825 else
3827 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3828 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3831 if (x)
3833 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3834 gimplify_assign (new_var, x, ilist);
3837 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3839 else if (c_kind == OMP_CLAUSE_REDUCTION
3840 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3842 if (pass == 0)
3843 continue;
3845 else if (pass != 0)
3846 continue;
3848 switch (OMP_CLAUSE_CODE (c))
3850 case OMP_CLAUSE_SHARED:
3851 /* Ignore shared directives in teams construct. */
3852 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3853 continue;
3854 /* Shared global vars are just accessed directly. */
3855 if (is_global_var (new_var))
3856 break;
3857 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3858 needs to be delayed until after fixup_child_record_type so
3859 that we get the correct type during the dereference. */
3860 by_ref = use_pointer_for_field (var, ctx);
3861 x = build_receiver_ref (var, by_ref, ctx);
3862 SET_DECL_VALUE_EXPR (new_var, x);
3863 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3865 /* ??? If VAR is not passed by reference, and the variable
3866 hasn't been initialized yet, then we'll get a warning for
3867 the store into the omp_data_s structure. Ideally, we'd be
3868 able to notice this and not store anything at all, but
3869 we're generating code too early. Suppress the warning. */
3870 if (!by_ref)
3871 TREE_NO_WARNING (var) = 1;
3872 break;
3874 case OMP_CLAUSE_LASTPRIVATE:
3875 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3876 break;
3877 /* FALLTHRU */
3879 case OMP_CLAUSE_PRIVATE:
3880 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3881 x = build_outer_var_ref (var, ctx);
3882 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3884 if (is_task_ctx (ctx))
3885 x = build_receiver_ref (var, false, ctx);
3886 else
3887 x = build_outer_var_ref (var, ctx);
3889 else
3890 x = NULL;
3891 do_private:
3892 tree nx;
3893 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3894 if (is_simd)
3896 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3897 if ((TREE_ADDRESSABLE (new_var) || nx || y
3898 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3899 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3900 idx, lane, ivar, lvar))
3902 if (nx)
3903 x = lang_hooks.decls.omp_clause_default_ctor
3904 (c, unshare_expr (ivar), x);
3905 if (nx && x)
3906 gimplify_and_add (x, &llist[0]);
3907 if (y)
3909 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3910 if (y)
3912 gimple_seq tseq = NULL;
3914 dtor = y;
3915 gimplify_stmt (&dtor, &tseq);
3916 gimple_seq_add_seq (&llist[1], tseq);
3919 break;
3922 if (nx)
3923 gimplify_and_add (nx, ilist);
3924 /* FALLTHRU */
3926 do_dtor:
3927 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3928 if (x)
3930 gimple_seq tseq = NULL;
3932 dtor = x;
3933 gimplify_stmt (&dtor, &tseq);
3934 gimple_seq_add_seq (dlist, tseq);
3936 break;
3938 case OMP_CLAUSE_LINEAR:
3939 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3940 goto do_firstprivate;
3941 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3942 x = NULL;
3943 else
3944 x = build_outer_var_ref (var, ctx);
3945 goto do_private;
3947 case OMP_CLAUSE_FIRSTPRIVATE:
3948 if (is_task_ctx (ctx))
3950 if (is_reference (var) || is_variable_sized (var))
3951 goto do_dtor;
3952 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3953 ctx))
3954 || use_pointer_for_field (var, NULL))
3956 x = build_receiver_ref (var, false, ctx);
3957 SET_DECL_VALUE_EXPR (new_var, x);
3958 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3959 goto do_dtor;
3962 do_firstprivate:
3963 x = build_outer_var_ref (var, ctx);
3964 if (is_simd)
3966 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3967 && gimple_omp_for_combined_into_p (ctx->stmt))
3969 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3970 tree stept = TREE_TYPE (t);
3971 tree ct = find_omp_clause (clauses,
3972 OMP_CLAUSE__LOOPTEMP_);
3973 gcc_assert (ct);
3974 tree l = OMP_CLAUSE_DECL (ct);
3975 tree n1 = fd->loop.n1;
3976 tree step = fd->loop.step;
3977 tree itype = TREE_TYPE (l);
3978 if (POINTER_TYPE_P (itype))
3979 itype = signed_type_for (itype);
3980 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3981 if (TYPE_UNSIGNED (itype)
3982 && fd->loop.cond_code == GT_EXPR)
3983 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3984 fold_build1 (NEGATE_EXPR, itype, l),
3985 fold_build1 (NEGATE_EXPR,
3986 itype, step));
3987 else
3988 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3989 t = fold_build2 (MULT_EXPR, stept,
3990 fold_convert (stept, l), t);
3992 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3994 x = lang_hooks.decls.omp_clause_linear_ctor
3995 (c, new_var, x, t);
3996 gimplify_and_add (x, ilist);
3997 goto do_dtor;
4000 if (POINTER_TYPE_P (TREE_TYPE (x)))
4001 x = fold_build2 (POINTER_PLUS_EXPR,
4002 TREE_TYPE (x), x, t);
4003 else
4004 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
4007 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
4008 || TREE_ADDRESSABLE (new_var))
4009 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
4010 idx, lane, ivar, lvar))
4012 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
4014 tree iv = create_tmp_var (TREE_TYPE (new_var));
4015 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
4016 gimplify_and_add (x, ilist);
4017 gimple_stmt_iterator gsi
4018 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
4019 gassign *g
4020 = gimple_build_assign (unshare_expr (lvar), iv);
4021 gsi_insert_before_without_update (&gsi, g,
4022 GSI_SAME_STMT);
4023 tree t = OMP_CLAUSE_LINEAR_STEP (c);
4024 enum tree_code code = PLUS_EXPR;
4025 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
4026 code = POINTER_PLUS_EXPR;
4027 g = gimple_build_assign (iv, code, iv, t);
4028 gsi_insert_before_without_update (&gsi, g,
4029 GSI_SAME_STMT);
4030 break;
4032 x = lang_hooks.decls.omp_clause_copy_ctor
4033 (c, unshare_expr (ivar), x);
4034 gimplify_and_add (x, &llist[0]);
4035 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
4036 if (x)
4038 gimple_seq tseq = NULL;
4040 dtor = x;
4041 gimplify_stmt (&dtor, &tseq);
4042 gimple_seq_add_seq (&llist[1], tseq);
4044 break;
4047 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
4048 gimplify_and_add (x, ilist);
4049 goto do_dtor;
4051 case OMP_CLAUSE__LOOPTEMP_:
4052 gcc_assert (is_parallel_ctx (ctx));
4053 x = build_outer_var_ref (var, ctx);
4054 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
4055 gimplify_and_add (x, ilist);
4056 break;
4058 case OMP_CLAUSE_COPYIN:
4059 by_ref = use_pointer_for_field (var, NULL);
4060 x = build_receiver_ref (var, by_ref, ctx);
4061 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
4062 append_to_statement_list (x, &copyin_seq);
4063 copyin_by_ref |= by_ref;
4064 break;
4066 case OMP_CLAUSE_REDUCTION:
4067 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4069 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4070 gimple tseq;
4071 x = build_outer_var_ref (var, ctx);
4073 if (is_reference (var)
4074 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4075 TREE_TYPE (x)))
4076 x = build_fold_addr_expr_loc (clause_loc, x);
4077 SET_DECL_VALUE_EXPR (placeholder, x);
4078 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4079 tree new_vard = new_var;
4080 if (is_reference (var))
4082 gcc_assert (TREE_CODE (new_var) == MEM_REF);
4083 new_vard = TREE_OPERAND (new_var, 0);
4084 gcc_assert (DECL_P (new_vard));
4086 if (is_simd
4087 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
4088 idx, lane, ivar, lvar))
4090 if (new_vard == new_var)
4092 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
4093 SET_DECL_VALUE_EXPR (new_var, ivar);
4095 else
4097 SET_DECL_VALUE_EXPR (new_vard,
4098 build_fold_addr_expr (ivar));
4099 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
4101 x = lang_hooks.decls.omp_clause_default_ctor
4102 (c, unshare_expr (ivar),
4103 build_outer_var_ref (var, ctx));
4104 if (x)
4105 gimplify_and_add (x, &llist[0]);
4106 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
4108 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
4109 lower_omp (&tseq, ctx);
4110 gimple_seq_add_seq (&llist[0], tseq);
4112 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
4113 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
4114 lower_omp (&tseq, ctx);
4115 gimple_seq_add_seq (&llist[1], tseq);
4116 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4117 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
4118 if (new_vard == new_var)
4119 SET_DECL_VALUE_EXPR (new_var, lvar);
4120 else
4121 SET_DECL_VALUE_EXPR (new_vard,
4122 build_fold_addr_expr (lvar));
4123 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
4124 if (x)
4126 tseq = NULL;
4127 dtor = x;
4128 gimplify_stmt (&dtor, &tseq);
4129 gimple_seq_add_seq (&llist[1], tseq);
4131 break;
4133 /* If this is a reference to constant size reduction var
4134 with placeholder, we haven't emitted the initializer
4135 for it because it is undesirable if SIMD arrays are used.
4136 But if they aren't used, we need to emit the deferred
4137 initialization now. */
4138 else if (is_reference (var) && is_simd)
4139 handle_simd_reference (clause_loc, new_vard, ilist);
4140 x = lang_hooks.decls.omp_clause_default_ctor
4141 (c, unshare_expr (new_var),
4142 build_outer_var_ref (var, ctx));
4143 if (x)
4144 gimplify_and_add (x, ilist);
4145 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
4147 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
4148 lower_omp (&tseq, ctx);
4149 gimple_seq_add_seq (ilist, tseq);
4151 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
4152 if (is_simd)
4154 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
4155 lower_omp (&tseq, ctx);
4156 gimple_seq_add_seq (dlist, tseq);
4157 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4159 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
4160 goto do_dtor;
4162 else
4164 x = omp_reduction_init (c, TREE_TYPE (new_var));
4165 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
4166 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
4168 /* reduction(-:var) sums up the partial results, so it
4169 acts identically to reduction(+:var). */
4170 if (code == MINUS_EXPR)
4171 code = PLUS_EXPR;
4173 tree new_vard = new_var;
4174 if (is_simd && is_reference (var))
4176 gcc_assert (TREE_CODE (new_var) == MEM_REF);
4177 new_vard = TREE_OPERAND (new_var, 0);
4178 gcc_assert (DECL_P (new_vard));
4180 if (is_simd
4181 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
4182 idx, lane, ivar, lvar))
4184 tree ref = build_outer_var_ref (var, ctx);
4186 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
4188 x = build2 (code, TREE_TYPE (ref), ref, ivar);
4189 ref = build_outer_var_ref (var, ctx);
4190 gimplify_assign (ref, x, &llist[1]);
4192 if (new_vard != new_var)
4194 SET_DECL_VALUE_EXPR (new_vard,
4195 build_fold_addr_expr (lvar));
4196 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
4199 else
4201 if (is_reference (var) && is_simd)
4202 handle_simd_reference (clause_loc, new_vard, ilist);
4203 gimplify_assign (new_var, x, ilist);
4204 if (is_simd)
4206 tree ref = build_outer_var_ref (var, ctx);
4208 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4209 ref = build_outer_var_ref (var, ctx);
4210 gimplify_assign (ref, x, dlist);
4214 break;
4216 default:
4217 gcc_unreachable ();
4222 if (lane)
4224 tree uid = create_tmp_var (ptr_type_node, "simduid");
4225 /* Don't want uninit warnings on simduid, it is always uninitialized,
4226 but we use it not for the value, but for the DECL_UID only. */
4227 TREE_NO_WARNING (uid) = 1;
4228 gimple g
4229 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
4230 gimple_call_set_lhs (g, lane);
4231 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
4232 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
4233 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
4234 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
4235 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
4236 gimple_omp_for_set_clauses (ctx->stmt, c);
4237 g = gimple_build_assign (lane, INTEGER_CST,
4238 build_int_cst (unsigned_type_node, 0));
4239 gimple_seq_add_stmt (ilist, g);
4240 for (int i = 0; i < 2; i++)
4241 if (llist[i])
4243 tree vf = create_tmp_var (unsigned_type_node);
4244 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
4245 gimple_call_set_lhs (g, vf);
4246 gimple_seq *seq = i == 0 ? ilist : dlist;
4247 gimple_seq_add_stmt (seq, g);
4248 tree t = build_int_cst (unsigned_type_node, 0);
4249 g = gimple_build_assign (idx, INTEGER_CST, t);
4250 gimple_seq_add_stmt (seq, g);
4251 tree body = create_artificial_label (UNKNOWN_LOCATION);
4252 tree header = create_artificial_label (UNKNOWN_LOCATION);
4253 tree end = create_artificial_label (UNKNOWN_LOCATION);
4254 gimple_seq_add_stmt (seq, gimple_build_goto (header));
4255 gimple_seq_add_stmt (seq, gimple_build_label (body));
4256 gimple_seq_add_seq (seq, llist[i]);
4257 t = build_int_cst (unsigned_type_node, 1);
4258 g = gimple_build_assign (idx, PLUS_EXPR, idx, t);
4259 gimple_seq_add_stmt (seq, g);
4260 gimple_seq_add_stmt (seq, gimple_build_label (header));
4261 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
4262 gimple_seq_add_stmt (seq, g);
4263 gimple_seq_add_stmt (seq, gimple_build_label (end));
4267 /* The copyin sequence is not to be executed by the main thread, since
4268 that would result in self-copies. Perhaps not visible to scalars,
4269 but it certainly is to C++ operator=. */
4270 if (copyin_seq)
4272 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
4274 x = build2 (NE_EXPR, boolean_type_node, x,
4275 build_int_cst (TREE_TYPE (x), 0));
4276 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
4277 gimplify_and_add (x, ilist);
4280 /* If any copyin variable is passed by reference, we must ensure the
4281 master thread doesn't modify it before it is copied over in all
4282 threads. Similarly for variables in both firstprivate and
4283 lastprivate clauses we need to ensure the lastprivate copying
4284 happens after firstprivate copying in all threads. And similarly
4285 for UDRs if initializer expression refers to omp_orig. */
4286 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
4288 /* Don't add any barrier for #pragma omp simd or
4289 #pragma omp distribute. */
4290 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
4291 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
4292 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
4295 /* If max_vf is non-zero, then we can use only a vectorization factor
4296 up to the max_vf we chose. So stick it into the safelen clause. */
4297 if (max_vf)
4299 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
4300 OMP_CLAUSE_SAFELEN);
4301 if (c == NULL_TREE
4302 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
4303 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
4304 max_vf) == 1))
4306 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
4307 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
4308 max_vf);
4309 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
4310 gimple_omp_for_set_clauses (ctx->stmt, c);
4316 /* Generate code to implement the LASTPRIVATE clauses. This is used for
4317 both parallel and workshare constructs. PREDICATE may be NULL if it's
4318 always true. */
4320 static void
4321 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
4322 omp_context *ctx)
4324 tree x, c, label = NULL, orig_clauses = clauses;
4325 bool par_clauses = false;
4326 tree simduid = NULL, lastlane = NULL;
4328 /* Early exit if there are no lastprivate or linear clauses. */
4329 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
4330 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
4331 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
4332 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
4333 break;
4334 if (clauses == NULL)
4336 /* If this was a workshare clause, see if it had been combined
4337 with its parallel. In that case, look for the clauses on the
4338 parallel statement itself. */
4339 if (is_parallel_ctx (ctx))
4340 return;
4342 ctx = ctx->outer;
4343 if (ctx == NULL || !is_parallel_ctx (ctx))
4344 return;
4346 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
4347 OMP_CLAUSE_LASTPRIVATE);
4348 if (clauses == NULL)
4349 return;
4350 par_clauses = true;
4353 if (predicate)
4355 gcond *stmt;
4356 tree label_true, arm1, arm2;
4358 label = create_artificial_label (UNKNOWN_LOCATION);
4359 label_true = create_artificial_label (UNKNOWN_LOCATION);
4360 arm1 = TREE_OPERAND (predicate, 0);
4361 arm2 = TREE_OPERAND (predicate, 1);
4362 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
4363 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
4364 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
4365 label_true, label);
4366 gimple_seq_add_stmt (stmt_list, stmt);
4367 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
4370 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4371 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
4373 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
4374 if (simduid)
4375 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
4378 for (c = clauses; c ;)
4380 tree var, new_var;
4381 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4383 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
4384 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4385 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
4387 var = OMP_CLAUSE_DECL (c);
4388 new_var = lookup_decl (var, ctx);
4390 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
4392 tree val = DECL_VALUE_EXPR (new_var);
4393 if (TREE_CODE (val) == ARRAY_REF
4394 && VAR_P (TREE_OPERAND (val, 0))
4395 && lookup_attribute ("omp simd array",
4396 DECL_ATTRIBUTES (TREE_OPERAND (val,
4397 0))))
4399 if (lastlane == NULL)
4401 lastlane = create_tmp_var (unsigned_type_node);
4402 gcall *g
4403 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
4404 2, simduid,
4405 TREE_OPERAND (val, 1));
4406 gimple_call_set_lhs (g, lastlane);
4407 gimple_seq_add_stmt (stmt_list, g);
4409 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
4410 TREE_OPERAND (val, 0), lastlane,
4411 NULL_TREE, NULL_TREE);
4415 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
4416 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
4418 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
4419 gimple_seq_add_seq (stmt_list,
4420 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
4421 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
4423 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4424 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
4426 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
4427 gimple_seq_add_seq (stmt_list,
4428 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
4429 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
4432 x = build_outer_var_ref (var, ctx);
4433 if (is_reference (var))
4434 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4435 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
4436 gimplify_and_add (x, stmt_list);
4438 c = OMP_CLAUSE_CHAIN (c);
4439 if (c == NULL && !par_clauses)
4441 /* If this was a workshare clause, see if it had been combined
4442 with its parallel. In that case, continue looking for the
4443 clauses also on the parallel statement itself. */
4444 if (is_parallel_ctx (ctx))
4445 break;
4447 ctx = ctx->outer;
4448 if (ctx == NULL || !is_parallel_ctx (ctx))
4449 break;
4451 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
4452 OMP_CLAUSE_LASTPRIVATE);
4453 par_clauses = true;
4457 if (label)
4458 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
4461 static void
4462 oacc_lower_reduction_var_helper (gimple_seq *stmt_seqp, omp_context *ctx,
4463 tree tid, tree var, tree new_var)
4465 /* The atomic add at the end of the sum creates unnecessary
4466 write contention on accelerators. To work around this,
4467 create an array to store the partial reductions. Later, in
4468 lower_omp_for (for openacc), the values of array will be
4469 combined. */
4471 tree t = NULL_TREE, array, x;
4472 tree type = get_base_type (var);
4473 gimple stmt;
4475 /* Now insert the partial reductions into the array. */
4477 /* Find the reduction array. */
4479 tree ptype = build_pointer_type (type);
4481 t = lookup_oacc_reduction (oacc_get_reduction_array_id (var), ctx);
4482 t = build_receiver_ref (t, false, ctx->outer);
4484 array = create_tmp_var (ptype);
4485 gimplify_assign (array, t, stmt_seqp);
4487 tree ptr = create_tmp_var (TREE_TYPE (array));
4489 /* Find the reduction array. */
4491 /* testing a unary conversion. */
4492 tree offset = create_tmp_var (sizetype);
4493 gimplify_assign (offset, TYPE_SIZE_UNIT (type),
4494 stmt_seqp);
4495 t = create_tmp_var (sizetype);
4496 gimplify_assign (t, unshare_expr (fold_build1 (NOP_EXPR, sizetype, tid)),
4497 stmt_seqp);
4498 stmt = gimple_build_assign (offset, MULT_EXPR, offset, t);
4499 gimple_seq_add_stmt (stmt_seqp, stmt);
4501 /* Offset expression. Does the POINTER_PLUS_EXPR take care
4502 of adding sizeof(var) to the array? */
4503 ptr = create_tmp_var (ptype);
4504 stmt = gimple_build_assign (unshare_expr (ptr), POINTER_PLUS_EXPR, array,
4505 offset);
4506 gimple_seq_add_stmt (stmt_seqp, stmt);
4508 /* Move the local sum to gfc$sum[i]. */
4509 x = unshare_expr (build_simple_mem_ref (ptr));
4510 stmt = gimplify_assign (x, new_var, stmt_seqp);
4513 /* Generate code to implement the REDUCTION clauses. */
4515 static void
4516 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
4518 gimple_seq sub_seq = NULL;
4519 gimple stmt;
4520 tree x, c, tid = NULL_TREE;
4521 int count = 0;
4523 /* SIMD reductions are handled in lower_rec_input_clauses. */
4524 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4525 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
4526 return;
4528 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
4529 update in that case, otherwise use a lock. */
4530 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
4531 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
4533 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4535 /* Never use OMP_ATOMIC for array reductions or UDRs. */
4536 count = -1;
4537 break;
4539 count++;
4542 if (count == 0)
4543 return;
4545 /* Initialize thread info for OpenACC. */
4546 if (is_gimple_omp_oacc (ctx->stmt))
4548 /* Get the current thread id. */
4549 tree call = builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM);
4550 tid = create_tmp_var (TREE_TYPE (TREE_TYPE (call)));
4551 gimple stmt = gimple_build_call (call, 0);
4552 gimple_call_set_lhs (stmt, tid);
4553 gimple_seq_add_stmt (stmt_seqp, stmt);
4556 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4558 tree var, ref, new_var;
4559 enum tree_code code;
4560 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4562 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
4563 continue;
4565 var = OMP_CLAUSE_DECL (c);
4566 new_var = lookup_decl (var, ctx);
4567 if (is_reference (var))
4568 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4569 ref = build_outer_var_ref (var, ctx);
4570 code = OMP_CLAUSE_REDUCTION_CODE (c);
4572 /* reduction(-:var) sums up the partial results, so it acts
4573 identically to reduction(+:var). */
4574 if (code == MINUS_EXPR)
4575 code = PLUS_EXPR;
4577 if (is_gimple_omp_oacc (ctx->stmt))
4579 gcc_checking_assert (!OMP_CLAUSE_REDUCTION_PLACEHOLDER (c));
4581 oacc_lower_reduction_var_helper (stmt_seqp, ctx, tid, var, new_var);
4583 else if (count == 1)
4585 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
4587 addr = save_expr (addr);
4588 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
4589 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
4590 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
4591 gimplify_and_add (x, stmt_seqp);
4592 return;
4594 else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4596 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4598 if (is_reference (var)
4599 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4600 TREE_TYPE (ref)))
4601 ref = build_fold_addr_expr_loc (clause_loc, ref);
4602 SET_DECL_VALUE_EXPR (placeholder, ref);
4603 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4604 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
4605 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
4606 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4607 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
4609 else
4611 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4612 ref = build_outer_var_ref (var, ctx);
4613 gimplify_assign (ref, x, &sub_seq);
4617 if (is_gimple_omp_oacc (ctx->stmt))
4618 return;
4620 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4622 gimple_seq_add_stmt (stmt_seqp, stmt);
4624 gimple_seq_add_seq (stmt_seqp, sub_seq);
4626 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4628 gimple_seq_add_stmt (stmt_seqp, stmt);
4632 /* Generate code to implement the COPYPRIVATE clauses. */
4634 static void
4635 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4636 omp_context *ctx)
4638 tree c;
4640 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4642 tree var, new_var, ref, x;
4643 bool by_ref;
4644 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4646 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4647 continue;
4649 var = OMP_CLAUSE_DECL (c);
4650 by_ref = use_pointer_for_field (var, NULL);
4652 ref = build_sender_ref (var, ctx);
4653 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4654 if (by_ref)
4656 x = build_fold_addr_expr_loc (clause_loc, new_var);
4657 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4659 gimplify_assign (ref, x, slist);
4661 ref = build_receiver_ref (var, false, ctx);
4662 if (by_ref)
4664 ref = fold_convert_loc (clause_loc,
4665 build_pointer_type (TREE_TYPE (new_var)),
4666 ref);
4667 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4669 if (is_reference (var))
4671 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4672 ref = build_simple_mem_ref_loc (clause_loc, ref);
4673 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4675 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4676 gimplify_and_add (x, rlist);
4681 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4682 and REDUCTION from the sender (aka parent) side. */
4684 static void
4685 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4686 omp_context *ctx)
4688 tree c;
4690 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4692 tree val, ref, x, var;
4693 bool by_ref, do_in = false, do_out = false;
4694 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4696 switch (OMP_CLAUSE_CODE (c))
4698 case OMP_CLAUSE_PRIVATE:
4699 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4700 break;
4701 continue;
4702 case OMP_CLAUSE_FIRSTPRIVATE:
4703 case OMP_CLAUSE_COPYIN:
4704 case OMP_CLAUSE_LASTPRIVATE:
4705 case OMP_CLAUSE_REDUCTION:
4706 case OMP_CLAUSE__LOOPTEMP_:
4707 break;
4708 default:
4709 continue;
4712 val = OMP_CLAUSE_DECL (c);
4713 var = lookup_decl_in_outer_ctx (val, ctx);
4715 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4716 && is_global_var (var))
4717 continue;
4718 if (is_variable_sized (val))
4719 continue;
4720 by_ref = use_pointer_for_field (val, NULL);
4722 switch (OMP_CLAUSE_CODE (c))
4724 case OMP_CLAUSE_PRIVATE:
4725 case OMP_CLAUSE_FIRSTPRIVATE:
4726 case OMP_CLAUSE_COPYIN:
4727 case OMP_CLAUSE__LOOPTEMP_:
4728 do_in = true;
4729 break;
4731 case OMP_CLAUSE_LASTPRIVATE:
4732 if (by_ref || is_reference (val))
4734 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4735 continue;
4736 do_in = true;
4738 else
4740 do_out = true;
4741 if (lang_hooks.decls.omp_private_outer_ref (val))
4742 do_in = true;
4744 break;
4746 case OMP_CLAUSE_REDUCTION:
4747 do_in = true;
4748 do_out = !(by_ref || is_reference (val));
4749 break;
4751 default:
4752 gcc_unreachable ();
4755 if (do_in)
4757 ref = build_sender_ref (val, ctx);
4758 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4759 gimplify_assign (ref, x, ilist);
4760 if (is_task_ctx (ctx))
4761 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4764 if (do_out)
4766 ref = build_sender_ref (val, ctx);
4767 gimplify_assign (var, ref, olist);
4772 /* Generate code to implement SHARED from the sender (aka parent)
4773 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4774 list things that got automatically shared. */
4776 static void
4777 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4779 tree var, ovar, nvar, f, x, record_type;
4781 if (ctx->record_type == NULL)
4782 return;
4784 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4785 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4787 ovar = DECL_ABSTRACT_ORIGIN (f);
4788 nvar = maybe_lookup_decl (ovar, ctx);
4789 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4790 continue;
4792 /* If CTX is a nested parallel directive. Find the immediately
4793 enclosing parallel or workshare construct that contains a
4794 mapping for OVAR. */
4795 var = lookup_decl_in_outer_ctx (ovar, ctx);
4797 if (use_pointer_for_field (ovar, ctx))
4799 x = build_sender_ref (ovar, ctx);
4800 var = build_fold_addr_expr (var);
4801 gimplify_assign (x, var, ilist);
4803 else
4805 x = build_sender_ref (ovar, ctx);
4806 gimplify_assign (x, var, ilist);
4808 if (!TREE_READONLY (var)
4809 /* We don't need to receive a new reference to a result
4810 or parm decl. In fact we may not store to it as we will
4811 invalidate any pending RSO and generate wrong gimple
4812 during inlining. */
4813 && !((TREE_CODE (var) == RESULT_DECL
4814 || TREE_CODE (var) == PARM_DECL)
4815 && DECL_BY_REFERENCE (var)))
4817 x = build_sender_ref (ovar, ctx);
4818 gimplify_assign (var, x, olist);
4825 /* A convenience function to build an empty GIMPLE_COND with just the
4826 condition. */
4828 static gcond *
4829 gimple_build_cond_empty (tree cond)
4831 enum tree_code pred_code;
4832 tree lhs, rhs;
4834 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4835 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4839 /* Build the function calls to GOMP_parallel_start etc to actually
4840 generate the parallel operation. REGION is the parallel region
4841 being expanded. BB is the block where to insert the code. WS_ARGS
4842 will be set if this is a call to a combined parallel+workshare
4843 construct, it contains the list of additional arguments needed by
4844 the workshare construct. */
4846 static void
4847 expand_parallel_call (struct omp_region *region, basic_block bb,
4848 gomp_parallel *entry_stmt,
4849 vec<tree, va_gc> *ws_args)
4851 tree t, t1, t2, val, cond, c, clauses, flags;
4852 gimple_stmt_iterator gsi;
4853 gimple stmt;
4854 enum built_in_function start_ix;
4855 int start_ix2;
4856 location_t clause_loc;
4857 vec<tree, va_gc> *args;
4859 clauses = gimple_omp_parallel_clauses (entry_stmt);
4861 /* Determine what flavor of GOMP_parallel we will be
4862 emitting. */
4863 start_ix = BUILT_IN_GOMP_PARALLEL;
4864 if (is_combined_parallel (region))
4866 switch (region->inner->type)
4868 case GIMPLE_OMP_FOR:
4869 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4870 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4871 + (region->inner->sched_kind
4872 == OMP_CLAUSE_SCHEDULE_RUNTIME
4873 ? 3 : region->inner->sched_kind));
4874 start_ix = (enum built_in_function)start_ix2;
4875 break;
4876 case GIMPLE_OMP_SECTIONS:
4877 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4878 break;
4879 default:
4880 gcc_unreachable ();
4884 /* By default, the value of NUM_THREADS is zero (selected at run time)
4885 and there is no conditional. */
4886 cond = NULL_TREE;
4887 val = build_int_cst (unsigned_type_node, 0);
4888 flags = build_int_cst (unsigned_type_node, 0);
4890 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4891 if (c)
4892 cond = OMP_CLAUSE_IF_EXPR (c);
4894 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4895 if (c)
4897 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4898 clause_loc = OMP_CLAUSE_LOCATION (c);
4900 else
4901 clause_loc = gimple_location (entry_stmt);
4903 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4904 if (c)
4905 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4907 /* Ensure 'val' is of the correct type. */
4908 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4910 /* If we found the clause 'if (cond)', build either
4911 (cond != 0) or (cond ? val : 1u). */
4912 if (cond)
4914 cond = gimple_boolify (cond);
4916 if (integer_zerop (val))
4917 val = fold_build2_loc (clause_loc,
4918 EQ_EXPR, unsigned_type_node, cond,
4919 build_int_cst (TREE_TYPE (cond), 0));
4920 else
4922 basic_block cond_bb, then_bb, else_bb;
4923 edge e, e_then, e_else;
4924 tree tmp_then, tmp_else, tmp_join, tmp_var;
4926 tmp_var = create_tmp_var (TREE_TYPE (val));
4927 if (gimple_in_ssa_p (cfun))
4929 tmp_then = make_ssa_name (tmp_var);
4930 tmp_else = make_ssa_name (tmp_var);
4931 tmp_join = make_ssa_name (tmp_var);
4933 else
4935 tmp_then = tmp_var;
4936 tmp_else = tmp_var;
4937 tmp_join = tmp_var;
4940 e = split_block_after_labels (bb);
4941 cond_bb = e->src;
4942 bb = e->dest;
4943 remove_edge (e);
4945 then_bb = create_empty_bb (cond_bb);
4946 else_bb = create_empty_bb (then_bb);
4947 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4948 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4950 stmt = gimple_build_cond_empty (cond);
4951 gsi = gsi_start_bb (cond_bb);
4952 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4954 gsi = gsi_start_bb (then_bb);
4955 stmt = gimple_build_assign (tmp_then, val);
4956 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4958 gsi = gsi_start_bb (else_bb);
4959 stmt = gimple_build_assign
4960 (tmp_else, build_int_cst (unsigned_type_node, 1));
4961 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4963 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4964 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4965 add_bb_to_loop (then_bb, cond_bb->loop_father);
4966 add_bb_to_loop (else_bb, cond_bb->loop_father);
4967 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4968 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4970 if (gimple_in_ssa_p (cfun))
4972 gphi *phi = create_phi_node (tmp_join, bb);
4973 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4974 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4977 val = tmp_join;
4980 gsi = gsi_start_bb (bb);
4981 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4982 false, GSI_CONTINUE_LINKING);
4985 gsi = gsi_last_bb (bb);
4986 t = gimple_omp_parallel_data_arg (entry_stmt);
4987 if (t == NULL)
4988 t1 = null_pointer_node;
4989 else
4990 t1 = build_fold_addr_expr (t);
4991 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4993 vec_alloc (args, 4 + vec_safe_length (ws_args));
4994 args->quick_push (t2);
4995 args->quick_push (t1);
4996 args->quick_push (val);
4997 if (ws_args)
4998 args->splice (*ws_args);
4999 args->quick_push (flags);
5001 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
5002 builtin_decl_explicit (start_ix), args);
5004 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5005 false, GSI_CONTINUE_LINKING);
5008 /* Insert a function call whose name is FUNC_NAME with the information from
5009 ENTRY_STMT into the basic_block BB. */
5011 static void
5012 expand_cilk_for_call (basic_block bb, gomp_parallel *entry_stmt,
5013 vec <tree, va_gc> *ws_args)
5015 tree t, t1, t2;
5016 gimple_stmt_iterator gsi;
5017 vec <tree, va_gc> *args;
5019 gcc_assert (vec_safe_length (ws_args) == 2);
5020 tree func_name = (*ws_args)[0];
5021 tree grain = (*ws_args)[1];
5023 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
5024 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
5025 gcc_assert (count != NULL_TREE);
5026 count = OMP_CLAUSE_OPERAND (count, 0);
5028 gsi = gsi_last_bb (bb);
5029 t = gimple_omp_parallel_data_arg (entry_stmt);
5030 if (t == NULL)
5031 t1 = null_pointer_node;
5032 else
5033 t1 = build_fold_addr_expr (t);
5034 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
5036 vec_alloc (args, 4);
5037 args->quick_push (t2);
5038 args->quick_push (t1);
5039 args->quick_push (count);
5040 args->quick_push (grain);
5041 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
5043 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
5044 GSI_CONTINUE_LINKING);
5047 /* Build the function call to GOMP_task to actually
5048 generate the task operation. BB is the block where to insert the code. */
5050 static void
5051 expand_task_call (basic_block bb, gomp_task *entry_stmt)
5053 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
5054 gimple_stmt_iterator gsi;
5055 location_t loc = gimple_location (entry_stmt);
5057 clauses = gimple_omp_task_clauses (entry_stmt);
5059 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
5060 if (c)
5061 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
5062 else
5063 cond = boolean_true_node;
5065 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
5066 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
5067 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
5068 flags = build_int_cst (unsigned_type_node,
5069 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
5071 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
5072 if (c)
5074 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
5075 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
5076 build_int_cst (unsigned_type_node, 2),
5077 build_int_cst (unsigned_type_node, 0));
5078 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
5080 if (depend)
5081 depend = OMP_CLAUSE_DECL (depend);
5082 else
5083 depend = build_int_cst (ptr_type_node, 0);
5085 gsi = gsi_last_bb (bb);
5086 t = gimple_omp_task_data_arg (entry_stmt);
5087 if (t == NULL)
5088 t2 = null_pointer_node;
5089 else
5090 t2 = build_fold_addr_expr_loc (loc, t);
5091 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
5092 t = gimple_omp_task_copy_fn (entry_stmt);
5093 if (t == NULL)
5094 t3 = null_pointer_node;
5095 else
5096 t3 = build_fold_addr_expr_loc (loc, t);
5098 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
5099 8, t1, t2, t3,
5100 gimple_omp_task_arg_size (entry_stmt),
5101 gimple_omp_task_arg_align (entry_stmt), cond, flags,
5102 depend);
5104 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5105 false, GSI_CONTINUE_LINKING);
5109 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
5110 catch handler and return it. This prevents programs from violating the
5111 structured block semantics with throws. */
5113 static gimple_seq
5114 maybe_catch_exception (gimple_seq body)
5116 gimple g;
5117 tree decl;
5119 if (!flag_exceptions)
5120 return body;
5122 if (lang_hooks.eh_protect_cleanup_actions != NULL)
5123 decl = lang_hooks.eh_protect_cleanup_actions ();
5124 else
5125 decl = builtin_decl_explicit (BUILT_IN_TRAP);
5127 g = gimple_build_eh_must_not_throw (decl);
5128 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
5129 GIMPLE_TRY_CATCH);
5131 return gimple_seq_alloc_with_stmt (g);
5134 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
5136 static tree
5137 vec2chain (vec<tree, va_gc> *v)
5139 tree chain = NULL_TREE, t;
5140 unsigned ix;
5142 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
5144 DECL_CHAIN (t) = chain;
5145 chain = t;
5148 return chain;
5152 /* Remove barriers in REGION->EXIT's block. Note that this is only
5153 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
5154 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
5155 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
5156 removed. */
5158 static void
5159 remove_exit_barrier (struct omp_region *region)
5161 gimple_stmt_iterator gsi;
5162 basic_block exit_bb;
5163 edge_iterator ei;
5164 edge e;
5165 gimple stmt;
5166 int any_addressable_vars = -1;
5168 exit_bb = region->exit;
5170 /* If the parallel region doesn't return, we don't have REGION->EXIT
5171 block at all. */
5172 if (! exit_bb)
5173 return;
5175 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
5176 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
5177 statements that can appear in between are extremely limited -- no
5178 memory operations at all. Here, we allow nothing at all, so the
5179 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
5180 gsi = gsi_last_bb (exit_bb);
5181 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5182 gsi_prev (&gsi);
5183 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
5184 return;
5186 FOR_EACH_EDGE (e, ei, exit_bb->preds)
5188 gsi = gsi_last_bb (e->src);
5189 if (gsi_end_p (gsi))
5190 continue;
5191 stmt = gsi_stmt (gsi);
5192 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
5193 && !gimple_omp_return_nowait_p (stmt))
5195 /* OpenMP 3.0 tasks unfortunately prevent this optimization
5196 in many cases. If there could be tasks queued, the barrier
5197 might be needed to let the tasks run before some local
5198 variable of the parallel that the task uses as shared
5199 runs out of scope. The task can be spawned either
5200 from within current function (this would be easy to check)
5201 or from some function it calls and gets passed an address
5202 of such a variable. */
5203 if (any_addressable_vars < 0)
5205 gomp_parallel *parallel_stmt
5206 = as_a <gomp_parallel *> (last_stmt (region->entry));
5207 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
5208 tree local_decls, block, decl;
5209 unsigned ix;
5211 any_addressable_vars = 0;
5212 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
5213 if (TREE_ADDRESSABLE (decl))
5215 any_addressable_vars = 1;
5216 break;
5218 for (block = gimple_block (stmt);
5219 !any_addressable_vars
5220 && block
5221 && TREE_CODE (block) == BLOCK;
5222 block = BLOCK_SUPERCONTEXT (block))
5224 for (local_decls = BLOCK_VARS (block);
5225 local_decls;
5226 local_decls = DECL_CHAIN (local_decls))
5227 if (TREE_ADDRESSABLE (local_decls))
5229 any_addressable_vars = 1;
5230 break;
5232 if (block == gimple_block (parallel_stmt))
5233 break;
5236 if (!any_addressable_vars)
5237 gimple_omp_return_set_nowait (stmt);
5242 static void
5243 remove_exit_barriers (struct omp_region *region)
5245 if (region->type == GIMPLE_OMP_PARALLEL)
5246 remove_exit_barrier (region);
5248 if (region->inner)
5250 region = region->inner;
5251 remove_exit_barriers (region);
5252 while (region->next)
5254 region = region->next;
5255 remove_exit_barriers (region);
5260 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
5261 calls. These can't be declared as const functions, but
5262 within one parallel body they are constant, so they can be
5263 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
5264 which are declared const. Similarly for task body, except
5265 that in untied task omp_get_thread_num () can change at any task
5266 scheduling point. */
5268 static void
5269 optimize_omp_library_calls (gimple entry_stmt)
5271 basic_block bb;
5272 gimple_stmt_iterator gsi;
5273 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
5274 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
5275 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
5276 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
5277 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
5278 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
5279 OMP_CLAUSE_UNTIED) != NULL);
5281 FOR_EACH_BB_FN (bb, cfun)
5282 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5284 gimple call = gsi_stmt (gsi);
5285 tree decl;
5287 if (is_gimple_call (call)
5288 && (decl = gimple_call_fndecl (call))
5289 && DECL_EXTERNAL (decl)
5290 && TREE_PUBLIC (decl)
5291 && DECL_INITIAL (decl) == NULL)
5293 tree built_in;
5295 if (DECL_NAME (decl) == thr_num_id)
5297 /* In #pragma omp task untied omp_get_thread_num () can change
5298 during the execution of the task region. */
5299 if (untied_task)
5300 continue;
5301 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
5303 else if (DECL_NAME (decl) == num_thr_id)
5304 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
5305 else
5306 continue;
5308 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
5309 || gimple_call_num_args (call) != 0)
5310 continue;
5312 if (flag_exceptions && !TREE_NOTHROW (decl))
5313 continue;
5315 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
5316 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
5317 TREE_TYPE (TREE_TYPE (built_in))))
5318 continue;
5320 gimple_call_set_fndecl (call, built_in);
5325 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
5326 regimplified. */
5328 static tree
5329 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
5331 tree t = *tp;
5333 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
5334 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
5335 return t;
5337 if (TREE_CODE (t) == ADDR_EXPR)
5338 recompute_tree_invariant_for_addr_expr (t);
5340 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
5341 return NULL_TREE;
5344 /* Prepend TO = FROM assignment before *GSI_P. */
5346 static void
5347 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
5349 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
5350 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
5351 true, GSI_SAME_STMT);
5352 gimple stmt = gimple_build_assign (to, from);
5353 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
5354 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
5355 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
5357 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
5358 gimple_regimplify_operands (stmt, &gsi);
5362 /* Expand the OpenMP parallel or task directive starting at REGION. */
5364 static void
5365 expand_omp_taskreg (struct omp_region *region)
5367 basic_block entry_bb, exit_bb, new_bb;
5368 struct function *child_cfun;
5369 tree child_fn, block, t;
5370 gimple_stmt_iterator gsi;
5371 gimple entry_stmt, stmt;
5372 edge e;
5373 vec<tree, va_gc> *ws_args;
5375 entry_stmt = last_stmt (region->entry);
5376 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
5377 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
5379 entry_bb = region->entry;
5380 exit_bb = region->exit;
5382 bool is_cilk_for
5383 = (flag_cilkplus
5384 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
5385 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
5386 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
5388 if (is_cilk_for)
5389 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
5390 and the inner statement contains the name of the built-in function
5391 and grain. */
5392 ws_args = region->inner->ws_args;
5393 else if (is_combined_parallel (region))
5394 ws_args = region->ws_args;
5395 else
5396 ws_args = NULL;
5398 if (child_cfun->cfg)
5400 /* Due to inlining, it may happen that we have already outlined
5401 the region, in which case all we need to do is make the
5402 sub-graph unreachable and emit the parallel call. */
5403 edge entry_succ_e, exit_succ_e;
5405 entry_succ_e = single_succ_edge (entry_bb);
5407 gsi = gsi_last_bb (entry_bb);
5408 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
5409 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
5410 gsi_remove (&gsi, true);
5412 new_bb = entry_bb;
5413 if (exit_bb)
5415 exit_succ_e = single_succ_edge (exit_bb);
5416 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
5418 remove_edge_and_dominated_blocks (entry_succ_e);
5420 else
5422 unsigned srcidx, dstidx, num;
5424 /* If the parallel region needs data sent from the parent
5425 function, then the very first statement (except possible
5426 tree profile counter updates) of the parallel body
5427 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
5428 &.OMP_DATA_O is passed as an argument to the child function,
5429 we need to replace it with the argument as seen by the child
5430 function.
5432 In most cases, this will end up being the identity assignment
5433 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
5434 a function call that has been inlined, the original PARM_DECL
5435 .OMP_DATA_I may have been converted into a different local
5436 variable. In which case, we need to keep the assignment. */
5437 if (gimple_omp_taskreg_data_arg (entry_stmt))
5439 basic_block entry_succ_bb = single_succ (entry_bb);
5440 tree arg, narg;
5441 gimple parcopy_stmt = NULL;
5443 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
5445 gimple stmt;
5447 gcc_assert (!gsi_end_p (gsi));
5448 stmt = gsi_stmt (gsi);
5449 if (gimple_code (stmt) != GIMPLE_ASSIGN)
5450 continue;
5452 if (gimple_num_ops (stmt) == 2)
5454 tree arg = gimple_assign_rhs1 (stmt);
5456 /* We're ignore the subcode because we're
5457 effectively doing a STRIP_NOPS. */
5459 if (TREE_CODE (arg) == ADDR_EXPR
5460 && TREE_OPERAND (arg, 0)
5461 == gimple_omp_taskreg_data_arg (entry_stmt))
5463 parcopy_stmt = stmt;
5464 break;
5469 gcc_assert (parcopy_stmt != NULL);
5470 arg = DECL_ARGUMENTS (child_fn);
5472 if (!gimple_in_ssa_p (cfun))
5474 if (gimple_assign_lhs (parcopy_stmt) == arg)
5475 gsi_remove (&gsi, true);
5476 else
5478 /* ?? Is setting the subcode really necessary ?? */
5479 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
5480 gimple_assign_set_rhs1 (parcopy_stmt, arg);
5483 else
5485 /* If we are in ssa form, we must load the value from the default
5486 definition of the argument. That should not be defined now,
5487 since the argument is not used uninitialized. */
5488 gcc_assert (ssa_default_def (cfun, arg) == NULL);
5489 narg = make_ssa_name (arg, gimple_build_nop ());
5490 set_ssa_default_def (cfun, arg, narg);
5491 /* ?? Is setting the subcode really necessary ?? */
5492 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
5493 gimple_assign_set_rhs1 (parcopy_stmt, narg);
5494 update_stmt (parcopy_stmt);
5498 /* Declare local variables needed in CHILD_CFUN. */
5499 block = DECL_INITIAL (child_fn);
5500 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
5501 /* The gimplifier could record temporaries in parallel/task block
5502 rather than in containing function's local_decls chain,
5503 which would mean cgraph missed finalizing them. Do it now. */
5504 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
5505 if (TREE_CODE (t) == VAR_DECL
5506 && TREE_STATIC (t)
5507 && !DECL_EXTERNAL (t))
5508 varpool_node::finalize_decl (t);
5509 DECL_SAVED_TREE (child_fn) = NULL;
5510 /* We'll create a CFG for child_fn, so no gimple body is needed. */
5511 gimple_set_body (child_fn, NULL);
5512 TREE_USED (block) = 1;
5514 /* Reset DECL_CONTEXT on function arguments. */
5515 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
5516 DECL_CONTEXT (t) = child_fn;
5518 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
5519 so that it can be moved to the child function. */
5520 gsi = gsi_last_bb (entry_bb);
5521 stmt = gsi_stmt (gsi);
5522 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
5523 || gimple_code (stmt) == GIMPLE_OMP_TASK));
5524 e = split_block (entry_bb, stmt);
5525 gsi_remove (&gsi, true);
5526 entry_bb = e->dest;
5527 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5529 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5530 if (exit_bb)
5532 gsi = gsi_last_bb (exit_bb);
5533 gcc_assert (!gsi_end_p (gsi)
5534 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5535 stmt = gimple_build_return (NULL);
5536 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5537 gsi_remove (&gsi, true);
5540 /* Move the parallel region into CHILD_CFUN. */
5542 if (gimple_in_ssa_p (cfun))
5544 init_tree_ssa (child_cfun);
5545 init_ssa_operands (child_cfun);
5546 child_cfun->gimple_df->in_ssa_p = true;
5547 block = NULL_TREE;
5549 else
5550 block = gimple_block (entry_stmt);
5552 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
5553 if (exit_bb)
5554 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
5555 /* When the OMP expansion process cannot guarantee an up-to-date
5556 loop tree arrange for the child function to fixup loops. */
5557 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
5558 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
5560 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
5561 num = vec_safe_length (child_cfun->local_decls);
5562 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
5564 t = (*child_cfun->local_decls)[srcidx];
5565 if (DECL_CONTEXT (t) == cfun->decl)
5566 continue;
5567 if (srcidx != dstidx)
5568 (*child_cfun->local_decls)[dstidx] = t;
5569 dstidx++;
5571 if (dstidx != num)
5572 vec_safe_truncate (child_cfun->local_decls, dstidx);
5574 /* Inform the callgraph about the new function. */
5575 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
5576 cgraph_node::add_new_function (child_fn, true);
5577 cgraph_node::get (child_fn)->parallelized_function = 1;
5579 /* Fix the callgraph edges for child_cfun. Those for cfun will be
5580 fixed in a following pass. */
5581 push_cfun (child_cfun);
5582 if (optimize)
5583 optimize_omp_library_calls (entry_stmt);
5584 cgraph_edge::rebuild_edges ();
5586 /* Some EH regions might become dead, see PR34608. If
5587 pass_cleanup_cfg isn't the first pass to happen with the
5588 new child, these dead EH edges might cause problems.
5589 Clean them up now. */
5590 if (flag_exceptions)
5592 basic_block bb;
5593 bool changed = false;
5595 FOR_EACH_BB_FN (bb, cfun)
5596 changed |= gimple_purge_dead_eh_edges (bb);
5597 if (changed)
5598 cleanup_tree_cfg ();
5600 if (gimple_in_ssa_p (cfun))
5601 update_ssa (TODO_update_ssa);
5602 pop_cfun ();
5605 /* Emit a library call to launch the children threads. */
5606 if (is_cilk_for)
5607 expand_cilk_for_call (new_bb,
5608 as_a <gomp_parallel *> (entry_stmt), ws_args);
5609 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
5610 expand_parallel_call (region, new_bb,
5611 as_a <gomp_parallel *> (entry_stmt), ws_args);
5612 else
5613 expand_task_call (new_bb, as_a <gomp_task *> (entry_stmt));
5614 if (gimple_in_ssa_p (cfun))
5615 update_ssa (TODO_update_ssa_only_virtuals);
5619 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
5620 of the combined collapse > 1 loop constructs, generate code like:
5621 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
5622 if (cond3 is <)
5623 adj = STEP3 - 1;
5624 else
5625 adj = STEP3 + 1;
5626 count3 = (adj + N32 - N31) / STEP3;
5627 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
5628 if (cond2 is <)
5629 adj = STEP2 - 1;
5630 else
5631 adj = STEP2 + 1;
5632 count2 = (adj + N22 - N21) / STEP2;
5633 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
5634 if (cond1 is <)
5635 adj = STEP1 - 1;
5636 else
5637 adj = STEP1 + 1;
5638 count1 = (adj + N12 - N11) / STEP1;
5639 count = count1 * count2 * count3;
5640 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
5641 count = 0;
5642 and set ZERO_ITER_BB to that bb. If this isn't the outermost
5643 of the combined loop constructs, just initialize COUNTS array
5644 from the _looptemp_ clauses. */
5646 /* NOTE: It *could* be better to moosh all of the BBs together,
5647 creating one larger BB with all the computation and the unexpected
5648 jump at the end. I.e.
5650 bool zero3, zero2, zero1, zero;
5652 zero3 = N32 c3 N31;
5653 count3 = (N32 - N31) /[cl] STEP3;
5654 zero2 = N22 c2 N21;
5655 count2 = (N22 - N21) /[cl] STEP2;
5656 zero1 = N12 c1 N11;
5657 count1 = (N12 - N11) /[cl] STEP1;
5658 zero = zero3 || zero2 || zero1;
5659 count = count1 * count2 * count3;
5660 if (__builtin_expect(zero, false)) goto zero_iter_bb;
5662 After all, we expect the zero=false, and thus we expect to have to
5663 evaluate all of the comparison expressions, so short-circuiting
5664 oughtn't be a win. Since the condition isn't protecting a
5665 denominator, we're not concerned about divide-by-zero, so we can
5666 fully evaluate count even if a numerator turned out to be wrong.
5668 It seems like putting this all together would create much better
5669 scheduling opportunities, and less pressure on the chip's branch
5670 predictor. */
5672 static void
5673 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5674 basic_block &entry_bb, tree *counts,
5675 basic_block &zero_iter_bb, int &first_zero_iter,
5676 basic_block &l2_dom_bb)
5678 tree t, type = TREE_TYPE (fd->loop.v);
5679 edge e, ne;
5680 int i;
5682 /* Collapsed loops need work for expansion into SSA form. */
5683 gcc_assert (!gimple_in_ssa_p (cfun));
5685 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5686 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5688 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5689 isn't supposed to be handled, as the inner loop doesn't
5690 use it. */
5691 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5692 OMP_CLAUSE__LOOPTEMP_);
5693 gcc_assert (innerc);
5694 for (i = 0; i < fd->collapse; i++)
5696 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5697 OMP_CLAUSE__LOOPTEMP_);
5698 gcc_assert (innerc);
5699 if (i)
5700 counts[i] = OMP_CLAUSE_DECL (innerc);
5701 else
5702 counts[0] = NULL_TREE;
5704 return;
5707 for (i = 0; i < fd->collapse; i++)
5709 tree itype = TREE_TYPE (fd->loops[i].v);
5711 if (SSA_VAR_P (fd->loop.n2)
5712 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5713 fold_convert (itype, fd->loops[i].n1),
5714 fold_convert (itype, fd->loops[i].n2)))
5715 == NULL_TREE || !integer_onep (t)))
5717 gcond *cond_stmt;
5718 tree n1, n2;
5719 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5720 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5721 true, GSI_SAME_STMT);
5722 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5723 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5724 true, GSI_SAME_STMT);
5725 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5726 NULL_TREE, NULL_TREE);
5727 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
5728 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
5729 expand_omp_regimplify_p, NULL, NULL)
5730 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
5731 expand_omp_regimplify_p, NULL, NULL))
5733 *gsi = gsi_for_stmt (cond_stmt);
5734 gimple_regimplify_operands (cond_stmt, gsi);
5736 e = split_block (entry_bb, cond_stmt);
5737 if (zero_iter_bb == NULL)
5739 gassign *assign_stmt;
5740 first_zero_iter = i;
5741 zero_iter_bb = create_empty_bb (entry_bb);
5742 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5743 *gsi = gsi_after_labels (zero_iter_bb);
5744 assign_stmt = gimple_build_assign (fd->loop.n2,
5745 build_zero_cst (type));
5746 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
5747 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5748 entry_bb);
5750 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5751 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5752 e->flags = EDGE_TRUE_VALUE;
5753 e->probability = REG_BR_PROB_BASE - ne->probability;
5754 if (l2_dom_bb == NULL)
5755 l2_dom_bb = entry_bb;
5756 entry_bb = e->dest;
5757 *gsi = gsi_last_bb (entry_bb);
5760 if (POINTER_TYPE_P (itype))
5761 itype = signed_type_for (itype);
5762 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5763 ? -1 : 1));
5764 t = fold_build2 (PLUS_EXPR, itype,
5765 fold_convert (itype, fd->loops[i].step), t);
5766 t = fold_build2 (PLUS_EXPR, itype, t,
5767 fold_convert (itype, fd->loops[i].n2));
5768 t = fold_build2 (MINUS_EXPR, itype, t,
5769 fold_convert (itype, fd->loops[i].n1));
5770 /* ?? We could probably use CEIL_DIV_EXPR instead of
5771 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5772 generate the same code in the end because generically we
5773 don't know that the values involved must be negative for
5774 GT?? */
5775 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5776 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5777 fold_build1 (NEGATE_EXPR, itype, t),
5778 fold_build1 (NEGATE_EXPR, itype,
5779 fold_convert (itype,
5780 fd->loops[i].step)));
5781 else
5782 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5783 fold_convert (itype, fd->loops[i].step));
5784 t = fold_convert (type, t);
5785 if (TREE_CODE (t) == INTEGER_CST)
5786 counts[i] = t;
5787 else
5789 counts[i] = create_tmp_reg (type, ".count");
5790 expand_omp_build_assign (gsi, counts[i], t);
5792 if (SSA_VAR_P (fd->loop.n2))
5794 if (i == 0)
5795 t = counts[0];
5796 else
5797 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5798 expand_omp_build_assign (gsi, fd->loop.n2, t);
5804 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5805 T = V;
5806 V3 = N31 + (T % count3) * STEP3;
5807 T = T / count3;
5808 V2 = N21 + (T % count2) * STEP2;
5809 T = T / count2;
5810 V1 = N11 + T * STEP1;
5811 if this loop doesn't have an inner loop construct combined with it.
5812 If it does have an inner loop construct combined with it and the
5813 iteration count isn't known constant, store values from counts array
5814 into its _looptemp_ temporaries instead. */
5816 static void
5817 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5818 tree *counts, gimple inner_stmt, tree startvar)
5820 int i;
5821 if (gimple_omp_for_combined_p (fd->for_stmt))
5823 /* If fd->loop.n2 is constant, then no propagation of the counts
5824 is needed, they are constant. */
5825 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5826 return;
5828 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5829 ? gimple_omp_parallel_clauses (inner_stmt)
5830 : gimple_omp_for_clauses (inner_stmt);
5831 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5832 isn't supposed to be handled, as the inner loop doesn't
5833 use it. */
5834 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5835 gcc_assert (innerc);
5836 for (i = 0; i < fd->collapse; i++)
5838 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5839 OMP_CLAUSE__LOOPTEMP_);
5840 gcc_assert (innerc);
5841 if (i)
5843 tree tem = OMP_CLAUSE_DECL (innerc);
5844 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5845 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5846 false, GSI_CONTINUE_LINKING);
5847 gassign *stmt = gimple_build_assign (tem, t);
5848 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5851 return;
5854 tree type = TREE_TYPE (fd->loop.v);
5855 tree tem = create_tmp_reg (type, ".tem");
5856 gassign *stmt = gimple_build_assign (tem, startvar);
5857 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5859 for (i = fd->collapse - 1; i >= 0; i--)
5861 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5862 itype = vtype;
5863 if (POINTER_TYPE_P (vtype))
5864 itype = signed_type_for (vtype);
5865 if (i != 0)
5866 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5867 else
5868 t = tem;
5869 t = fold_convert (itype, t);
5870 t = fold_build2 (MULT_EXPR, itype, t,
5871 fold_convert (itype, fd->loops[i].step));
5872 if (POINTER_TYPE_P (vtype))
5873 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5874 else
5875 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5876 t = force_gimple_operand_gsi (gsi, t,
5877 DECL_P (fd->loops[i].v)
5878 && TREE_ADDRESSABLE (fd->loops[i].v),
5879 NULL_TREE, false,
5880 GSI_CONTINUE_LINKING);
5881 stmt = gimple_build_assign (fd->loops[i].v, t);
5882 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5883 if (i != 0)
5885 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5886 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5887 false, GSI_CONTINUE_LINKING);
5888 stmt = gimple_build_assign (tem, t);
5889 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5895 /* Helper function for expand_omp_for_*. Generate code like:
5896 L10:
5897 V3 += STEP3;
5898 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5899 L11:
5900 V3 = N31;
5901 V2 += STEP2;
5902 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5903 L12:
5904 V2 = N21;
5905 V1 += STEP1;
5906 goto BODY_BB; */
5908 static basic_block
5909 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5910 basic_block body_bb)
5912 basic_block last_bb, bb, collapse_bb = NULL;
5913 int i;
5914 gimple_stmt_iterator gsi;
5915 edge e;
5916 tree t;
5917 gimple stmt;
5919 last_bb = cont_bb;
5920 for (i = fd->collapse - 1; i >= 0; i--)
5922 tree vtype = TREE_TYPE (fd->loops[i].v);
5924 bb = create_empty_bb (last_bb);
5925 add_bb_to_loop (bb, last_bb->loop_father);
5926 gsi = gsi_start_bb (bb);
5928 if (i < fd->collapse - 1)
5930 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5931 e->probability = REG_BR_PROB_BASE / 8;
5933 t = fd->loops[i + 1].n1;
5934 t = force_gimple_operand_gsi (&gsi, t,
5935 DECL_P (fd->loops[i + 1].v)
5936 && TREE_ADDRESSABLE (fd->loops[i
5937 + 1].v),
5938 NULL_TREE, false,
5939 GSI_CONTINUE_LINKING);
5940 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5941 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5943 else
5944 collapse_bb = bb;
5946 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5948 if (POINTER_TYPE_P (vtype))
5949 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5950 else
5951 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5952 t = force_gimple_operand_gsi (&gsi, t,
5953 DECL_P (fd->loops[i].v)
5954 && TREE_ADDRESSABLE (fd->loops[i].v),
5955 NULL_TREE, false, GSI_CONTINUE_LINKING);
5956 stmt = gimple_build_assign (fd->loops[i].v, t);
5957 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5959 if (i > 0)
5961 t = fd->loops[i].n2;
5962 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5963 false, GSI_CONTINUE_LINKING);
5964 tree v = fd->loops[i].v;
5965 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5966 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5967 false, GSI_CONTINUE_LINKING);
5968 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5969 stmt = gimple_build_cond_empty (t);
5970 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5971 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5972 e->probability = REG_BR_PROB_BASE * 7 / 8;
5974 else
5975 make_edge (bb, body_bb, EDGE_FALLTHRU);
5976 last_bb = bb;
5979 return collapse_bb;
5983 /* A subroutine of expand_omp_for. Generate code for a parallel
5984 loop with any schedule. Given parameters:
5986 for (V = N1; V cond N2; V += STEP) BODY;
5988 where COND is "<" or ">", we generate pseudocode
5990 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5991 if (more) goto L0; else goto L3;
5993 V = istart0;
5994 iend = iend0;
5996 BODY;
5997 V += STEP;
5998 if (V cond iend) goto L1; else goto L2;
6000 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
6003 If this is a combined omp parallel loop, instead of the call to
6004 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
6005 If this is gimple_omp_for_combined_p loop, then instead of assigning
6006 V and iend in L0 we assign the first two _looptemp_ clause decls of the
6007 inner GIMPLE_OMP_FOR and V += STEP; and
6008 if (V cond iend) goto L1; else goto L2; are removed.
6010 For collapsed loops, given parameters:
6011 collapse(3)
6012 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6013 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6014 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6015 BODY;
6017 we generate pseudocode
6019 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
6020 if (cond3 is <)
6021 adj = STEP3 - 1;
6022 else
6023 adj = STEP3 + 1;
6024 count3 = (adj + N32 - N31) / STEP3;
6025 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
6026 if (cond2 is <)
6027 adj = STEP2 - 1;
6028 else
6029 adj = STEP2 + 1;
6030 count2 = (adj + N22 - N21) / STEP2;
6031 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
6032 if (cond1 is <)
6033 adj = STEP1 - 1;
6034 else
6035 adj = STEP1 + 1;
6036 count1 = (adj + N12 - N11) / STEP1;
6037 count = count1 * count2 * count3;
6038 goto Z1;
6040 count = 0;
6042 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
6043 if (more) goto L0; else goto L3;
6045 V = istart0;
6046 T = V;
6047 V3 = N31 + (T % count3) * STEP3;
6048 T = T / count3;
6049 V2 = N21 + (T % count2) * STEP2;
6050 T = T / count2;
6051 V1 = N11 + T * STEP1;
6052 iend = iend0;
6054 BODY;
6055 V += 1;
6056 if (V < iend) goto L10; else goto L2;
6057 L10:
6058 V3 += STEP3;
6059 if (V3 cond3 N32) goto L1; else goto L11;
6060 L11:
6061 V3 = N31;
6062 V2 += STEP2;
6063 if (V2 cond2 N22) goto L1; else goto L12;
6064 L12:
6065 V2 = N21;
6066 V1 += STEP1;
6067 goto L1;
6069 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
6074 static void
6075 expand_omp_for_generic (struct omp_region *region,
6076 struct omp_for_data *fd,
6077 enum built_in_function start_fn,
6078 enum built_in_function next_fn,
6079 gimple inner_stmt)
6081 tree type, istart0, iend0, iend;
6082 tree t, vmain, vback, bias = NULL_TREE;
6083 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
6084 basic_block l2_bb = NULL, l3_bb = NULL;
6085 gimple_stmt_iterator gsi;
6086 gassign *assign_stmt;
6087 bool in_combined_parallel = is_combined_parallel (region);
6088 bool broken_loop = region->cont == NULL;
6089 edge e, ne;
6090 tree *counts = NULL;
6091 int i;
6093 gcc_assert (!broken_loop || !in_combined_parallel);
6094 gcc_assert (fd->iter_type == long_integer_type_node
6095 || !in_combined_parallel);
6097 type = TREE_TYPE (fd->loop.v);
6098 istart0 = create_tmp_var (fd->iter_type, ".istart0");
6099 iend0 = create_tmp_var (fd->iter_type, ".iend0");
6100 TREE_ADDRESSABLE (istart0) = 1;
6101 TREE_ADDRESSABLE (iend0) = 1;
6103 /* See if we need to bias by LLONG_MIN. */
6104 if (fd->iter_type == long_long_unsigned_type_node
6105 && TREE_CODE (type) == INTEGER_TYPE
6106 && !TYPE_UNSIGNED (type))
6108 tree n1, n2;
6110 if (fd->loop.cond_code == LT_EXPR)
6112 n1 = fd->loop.n1;
6113 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
6115 else
6117 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
6118 n2 = fd->loop.n1;
6120 if (TREE_CODE (n1) != INTEGER_CST
6121 || TREE_CODE (n2) != INTEGER_CST
6122 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
6123 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
6126 entry_bb = region->entry;
6127 cont_bb = region->cont;
6128 collapse_bb = NULL;
6129 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6130 gcc_assert (broken_loop
6131 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6132 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
6133 l1_bb = single_succ (l0_bb);
6134 if (!broken_loop)
6136 l2_bb = create_empty_bb (cont_bb);
6137 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
6138 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6140 else
6141 l2_bb = NULL;
6142 l3_bb = BRANCH_EDGE (entry_bb)->dest;
6143 exit_bb = region->exit;
6145 gsi = gsi_last_bb (entry_bb);
6147 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6148 if (fd->collapse > 1)
6150 int first_zero_iter = -1;
6151 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
6153 counts = XALLOCAVEC (tree, fd->collapse);
6154 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6155 zero_iter_bb, first_zero_iter,
6156 l2_dom_bb);
6158 if (zero_iter_bb)
6160 /* Some counts[i] vars might be uninitialized if
6161 some loop has zero iterations. But the body shouldn't
6162 be executed in that case, so just avoid uninit warnings. */
6163 for (i = first_zero_iter; i < fd->collapse; i++)
6164 if (SSA_VAR_P (counts[i]))
6165 TREE_NO_WARNING (counts[i]) = 1;
6166 gsi_prev (&gsi);
6167 e = split_block (entry_bb, gsi_stmt (gsi));
6168 entry_bb = e->dest;
6169 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
6170 gsi = gsi_last_bb (entry_bb);
6171 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
6172 get_immediate_dominator (CDI_DOMINATORS,
6173 zero_iter_bb));
6176 if (in_combined_parallel)
6178 /* In a combined parallel loop, emit a call to
6179 GOMP_loop_foo_next. */
6180 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
6181 build_fold_addr_expr (istart0),
6182 build_fold_addr_expr (iend0));
6184 else
6186 tree t0, t1, t2, t3, t4;
6187 /* If this is not a combined parallel loop, emit a call to
6188 GOMP_loop_foo_start in ENTRY_BB. */
6189 t4 = build_fold_addr_expr (iend0);
6190 t3 = build_fold_addr_expr (istart0);
6191 t2 = fold_convert (fd->iter_type, fd->loop.step);
6192 t1 = fd->loop.n2;
6193 t0 = fd->loop.n1;
6194 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6196 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6197 OMP_CLAUSE__LOOPTEMP_);
6198 gcc_assert (innerc);
6199 t0 = OMP_CLAUSE_DECL (innerc);
6200 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6201 OMP_CLAUSE__LOOPTEMP_);
6202 gcc_assert (innerc);
6203 t1 = OMP_CLAUSE_DECL (innerc);
6205 if (POINTER_TYPE_P (TREE_TYPE (t0))
6206 && TYPE_PRECISION (TREE_TYPE (t0))
6207 != TYPE_PRECISION (fd->iter_type))
6209 /* Avoid casting pointers to integer of a different size. */
6210 tree itype = signed_type_for (type);
6211 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
6212 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
6214 else
6216 t1 = fold_convert (fd->iter_type, t1);
6217 t0 = fold_convert (fd->iter_type, t0);
6219 if (bias)
6221 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
6222 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
6224 if (fd->iter_type == long_integer_type_node)
6226 if (fd->chunk_size)
6228 t = fold_convert (fd->iter_type, fd->chunk_size);
6229 t = build_call_expr (builtin_decl_explicit (start_fn),
6230 6, t0, t1, t2, t, t3, t4);
6232 else
6233 t = build_call_expr (builtin_decl_explicit (start_fn),
6234 5, t0, t1, t2, t3, t4);
6236 else
6238 tree t5;
6239 tree c_bool_type;
6240 tree bfn_decl;
6242 /* The GOMP_loop_ull_*start functions have additional boolean
6243 argument, true for < loops and false for > loops.
6244 In Fortran, the C bool type can be different from
6245 boolean_type_node. */
6246 bfn_decl = builtin_decl_explicit (start_fn);
6247 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
6248 t5 = build_int_cst (c_bool_type,
6249 fd->loop.cond_code == LT_EXPR ? 1 : 0);
6250 if (fd->chunk_size)
6252 tree bfn_decl = builtin_decl_explicit (start_fn);
6253 t = fold_convert (fd->iter_type, fd->chunk_size);
6254 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
6256 else
6257 t = build_call_expr (builtin_decl_explicit (start_fn),
6258 6, t5, t0, t1, t2, t3, t4);
6261 if (TREE_TYPE (t) != boolean_type_node)
6262 t = fold_build2 (NE_EXPR, boolean_type_node,
6263 t, build_int_cst (TREE_TYPE (t), 0));
6264 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6265 true, GSI_SAME_STMT);
6266 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6268 /* Remove the GIMPLE_OMP_FOR statement. */
6269 gsi_remove (&gsi, true);
6271 /* Iteration setup for sequential loop goes in L0_BB. */
6272 tree startvar = fd->loop.v;
6273 tree endvar = NULL_TREE;
6275 if (gimple_omp_for_combined_p (fd->for_stmt))
6277 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
6278 && gimple_omp_for_kind (inner_stmt)
6279 == GF_OMP_FOR_KIND_SIMD);
6280 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
6281 OMP_CLAUSE__LOOPTEMP_);
6282 gcc_assert (innerc);
6283 startvar = OMP_CLAUSE_DECL (innerc);
6284 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6285 OMP_CLAUSE__LOOPTEMP_);
6286 gcc_assert (innerc);
6287 endvar = OMP_CLAUSE_DECL (innerc);
6290 gsi = gsi_start_bb (l0_bb);
6291 t = istart0;
6292 if (bias)
6293 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
6294 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
6295 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
6296 t = fold_convert (TREE_TYPE (startvar), t);
6297 t = force_gimple_operand_gsi (&gsi, t,
6298 DECL_P (startvar)
6299 && TREE_ADDRESSABLE (startvar),
6300 NULL_TREE, false, GSI_CONTINUE_LINKING);
6301 assign_stmt = gimple_build_assign (startvar, t);
6302 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6304 t = iend0;
6305 if (bias)
6306 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
6307 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
6308 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
6309 t = fold_convert (TREE_TYPE (startvar), t);
6310 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6311 false, GSI_CONTINUE_LINKING);
6312 if (endvar)
6314 assign_stmt = gimple_build_assign (endvar, iend);
6315 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6316 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
6317 assign_stmt = gimple_build_assign (fd->loop.v, iend);
6318 else
6319 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
6320 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6322 if (fd->collapse > 1)
6323 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6325 if (!broken_loop)
6327 /* Code to control the increment and predicate for the sequential
6328 loop goes in the CONT_BB. */
6329 gsi = gsi_last_bb (cont_bb);
6330 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
6331 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
6332 vmain = gimple_omp_continue_control_use (cont_stmt);
6333 vback = gimple_omp_continue_control_def (cont_stmt);
6335 if (!gimple_omp_for_combined_p (fd->for_stmt))
6337 if (POINTER_TYPE_P (type))
6338 t = fold_build_pointer_plus (vmain, fd->loop.step);
6339 else
6340 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
6341 t = force_gimple_operand_gsi (&gsi, t,
6342 DECL_P (vback)
6343 && TREE_ADDRESSABLE (vback),
6344 NULL_TREE, true, GSI_SAME_STMT);
6345 assign_stmt = gimple_build_assign (vback, t);
6346 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6348 t = build2 (fd->loop.cond_code, boolean_type_node,
6349 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
6350 iend);
6351 gcond *cond_stmt = gimple_build_cond_empty (t);
6352 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6355 /* Remove GIMPLE_OMP_CONTINUE. */
6356 gsi_remove (&gsi, true);
6358 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6359 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
6361 /* Emit code to get the next parallel iteration in L2_BB. */
6362 gsi = gsi_start_bb (l2_bb);
6364 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
6365 build_fold_addr_expr (istart0),
6366 build_fold_addr_expr (iend0));
6367 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6368 false, GSI_CONTINUE_LINKING);
6369 if (TREE_TYPE (t) != boolean_type_node)
6370 t = fold_build2 (NE_EXPR, boolean_type_node,
6371 t, build_int_cst (TREE_TYPE (t), 0));
6372 gcond *cond_stmt = gimple_build_cond_empty (t);
6373 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
6376 /* Add the loop cleanup function. */
6377 gsi = gsi_last_bb (exit_bb);
6378 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6379 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
6380 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
6381 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
6382 else
6383 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
6384 gcall *call_stmt = gimple_build_call (t, 0);
6385 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
6386 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
6387 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
6388 gsi_remove (&gsi, true);
6390 /* Connect the new blocks. */
6391 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
6392 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
6394 if (!broken_loop)
6396 gimple_seq phis;
6398 e = find_edge (cont_bb, l3_bb);
6399 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
6401 phis = phi_nodes (l3_bb);
6402 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
6404 gimple phi = gsi_stmt (gsi);
6405 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
6406 PHI_ARG_DEF_FROM_EDGE (phi, e));
6408 remove_edge (e);
6410 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
6411 add_bb_to_loop (l2_bb, cont_bb->loop_father);
6412 e = find_edge (cont_bb, l1_bb);
6413 if (gimple_omp_for_combined_p (fd->for_stmt))
6415 remove_edge (e);
6416 e = NULL;
6418 else if (fd->collapse > 1)
6420 remove_edge (e);
6421 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6423 else
6424 e->flags = EDGE_TRUE_VALUE;
6425 if (e)
6427 e->probability = REG_BR_PROB_BASE * 7 / 8;
6428 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
6430 else
6432 e = find_edge (cont_bb, l2_bb);
6433 e->flags = EDGE_FALLTHRU;
6435 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
6437 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
6438 recompute_dominator (CDI_DOMINATORS, l2_bb));
6439 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
6440 recompute_dominator (CDI_DOMINATORS, l3_bb));
6441 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
6442 recompute_dominator (CDI_DOMINATORS, l0_bb));
6443 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
6444 recompute_dominator (CDI_DOMINATORS, l1_bb));
6446 struct loop *outer_loop = alloc_loop ();
6447 outer_loop->header = l0_bb;
6448 outer_loop->latch = l2_bb;
6449 add_loop (outer_loop, l0_bb->loop_father);
6451 if (!gimple_omp_for_combined_p (fd->for_stmt))
6453 struct loop *loop = alloc_loop ();
6454 loop->header = l1_bb;
6455 /* The loop may have multiple latches. */
6456 add_loop (loop, outer_loop);
6462 /* A subroutine of expand_omp_for. Generate code for a parallel
6463 loop with static schedule and no specified chunk size. Given
6464 parameters:
6466 for (V = N1; V cond N2; V += STEP) BODY;
6468 where COND is "<" or ">", we generate pseudocode
6470 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6471 if (cond is <)
6472 adj = STEP - 1;
6473 else
6474 adj = STEP + 1;
6475 if ((__typeof (V)) -1 > 0 && cond is >)
6476 n = -(adj + N2 - N1) / -STEP;
6477 else
6478 n = (adj + N2 - N1) / STEP;
6479 q = n / nthreads;
6480 tt = n % nthreads;
6481 if (threadid < tt) goto L3; else goto L4;
6483 tt = 0;
6484 q = q + 1;
6486 s0 = q * threadid + tt;
6487 e0 = s0 + q;
6488 V = s0 * STEP + N1;
6489 if (s0 >= e0) goto L2; else goto L0;
6491 e = e0 * STEP + N1;
6493 BODY;
6494 V += STEP;
6495 if (V cond e) goto L1;
6499 static void
6500 expand_omp_for_static_nochunk (struct omp_region *region,
6501 struct omp_for_data *fd,
6502 gimple inner_stmt)
6504 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
6505 tree type, itype, vmain, vback;
6506 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
6507 basic_block body_bb, cont_bb, collapse_bb = NULL;
6508 basic_block fin_bb;
6509 gimple_stmt_iterator gsi;
6510 edge ep;
6511 bool broken_loop = region->cont == NULL;
6512 tree *counts = NULL;
6513 tree n1, n2, step;
6515 gcc_checking_assert ((gimple_omp_for_kind (fd->for_stmt)
6516 != GF_OMP_FOR_KIND_OACC_LOOP)
6517 || !inner_stmt);
6519 itype = type = TREE_TYPE (fd->loop.v);
6520 if (POINTER_TYPE_P (type))
6521 itype = signed_type_for (type);
6523 entry_bb = region->entry;
6524 cont_bb = region->cont;
6525 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6526 fin_bb = BRANCH_EDGE (entry_bb)->dest;
6527 gcc_assert (broken_loop
6528 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
6529 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
6530 body_bb = single_succ (seq_start_bb);
6531 if (!broken_loop)
6533 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6534 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6536 exit_bb = region->exit;
6538 /* Iteration space partitioning goes in ENTRY_BB. */
6539 gsi = gsi_last_bb (entry_bb);
6540 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6542 if (fd->collapse > 1)
6544 int first_zero_iter = -1;
6545 basic_block l2_dom_bb = NULL;
6547 counts = XALLOCAVEC (tree, fd->collapse);
6548 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6549 fin_bb, first_zero_iter,
6550 l2_dom_bb);
6551 t = NULL_TREE;
6553 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6554 t = integer_one_node;
6555 else
6556 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6557 fold_convert (type, fd->loop.n1),
6558 fold_convert (type, fd->loop.n2));
6559 if (fd->collapse == 1
6560 && TYPE_UNSIGNED (type)
6561 && (t == NULL_TREE || !integer_onep (t)))
6563 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6564 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6565 true, GSI_SAME_STMT);
6566 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6567 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6568 true, GSI_SAME_STMT);
6569 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6570 NULL_TREE, NULL_TREE);
6571 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6572 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6573 expand_omp_regimplify_p, NULL, NULL)
6574 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6575 expand_omp_regimplify_p, NULL, NULL))
6577 gsi = gsi_for_stmt (cond_stmt);
6578 gimple_regimplify_operands (cond_stmt, &gsi);
6580 ep = split_block (entry_bb, cond_stmt);
6581 ep->flags = EDGE_TRUE_VALUE;
6582 entry_bb = ep->dest;
6583 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6584 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
6585 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
6586 if (gimple_in_ssa_p (cfun))
6588 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6589 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
6590 !gsi_end_p (gpi); gsi_next (&gpi))
6592 gphi *phi = gpi.phi ();
6593 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6594 ep, UNKNOWN_LOCATION);
6597 gsi = gsi_last_bb (entry_bb);
6600 switch (gimple_omp_for_kind (fd->for_stmt))
6602 case GF_OMP_FOR_KIND_FOR:
6603 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
6604 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6605 break;
6606 case GF_OMP_FOR_KIND_DISTRIBUTE:
6607 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
6608 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
6609 break;
6610 case GF_OMP_FOR_KIND_OACC_LOOP:
6611 nthreads = builtin_decl_explicit (BUILT_IN_GOACC_GET_NUM_THREADS);
6612 threadid = builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM);
6613 break;
6614 default:
6615 gcc_unreachable ();
6617 nthreads = build_call_expr (nthreads, 0);
6618 nthreads = fold_convert (itype, nthreads);
6619 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
6620 true, GSI_SAME_STMT);
6621 threadid = build_call_expr (threadid, 0);
6622 threadid = fold_convert (itype, threadid);
6623 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
6624 true, GSI_SAME_STMT);
6626 n1 = fd->loop.n1;
6627 n2 = fd->loop.n2;
6628 step = fd->loop.step;
6629 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6631 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6632 OMP_CLAUSE__LOOPTEMP_);
6633 gcc_assert (innerc);
6634 n1 = OMP_CLAUSE_DECL (innerc);
6635 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6636 OMP_CLAUSE__LOOPTEMP_);
6637 gcc_assert (innerc);
6638 n2 = OMP_CLAUSE_DECL (innerc);
6640 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6641 true, NULL_TREE, true, GSI_SAME_STMT);
6642 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6643 true, NULL_TREE, true, GSI_SAME_STMT);
6644 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6645 true, NULL_TREE, true, GSI_SAME_STMT);
6647 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6648 t = fold_build2 (PLUS_EXPR, itype, step, t);
6649 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6650 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6651 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6652 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6653 fold_build1 (NEGATE_EXPR, itype, t),
6654 fold_build1 (NEGATE_EXPR, itype, step));
6655 else
6656 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6657 t = fold_convert (itype, t);
6658 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6660 q = create_tmp_reg (itype, "q");
6661 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
6662 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6663 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
6665 tt = create_tmp_reg (itype, "tt");
6666 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
6667 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6668 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
6670 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
6671 gcond *cond_stmt = gimple_build_cond_empty (t);
6672 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6674 second_bb = split_block (entry_bb, cond_stmt)->dest;
6675 gsi = gsi_last_bb (second_bb);
6676 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6678 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
6679 GSI_SAME_STMT);
6680 gassign *assign_stmt
6681 = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1));
6682 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6684 third_bb = split_block (second_bb, assign_stmt)->dest;
6685 gsi = gsi_last_bb (third_bb);
6686 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6688 t = build2 (MULT_EXPR, itype, q, threadid);
6689 t = build2 (PLUS_EXPR, itype, t, tt);
6690 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6692 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6693 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6695 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6696 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6698 /* Remove the GIMPLE_OMP_FOR statement. */
6699 gsi_remove (&gsi, true);
6701 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6702 gsi = gsi_start_bb (seq_start_bb);
6704 tree startvar = fd->loop.v;
6705 tree endvar = NULL_TREE;
6707 if (gimple_omp_for_combined_p (fd->for_stmt))
6709 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6710 ? gimple_omp_parallel_clauses (inner_stmt)
6711 : gimple_omp_for_clauses (inner_stmt);
6712 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6713 gcc_assert (innerc);
6714 startvar = OMP_CLAUSE_DECL (innerc);
6715 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6716 OMP_CLAUSE__LOOPTEMP_);
6717 gcc_assert (innerc);
6718 endvar = OMP_CLAUSE_DECL (innerc);
6720 t = fold_convert (itype, s0);
6721 t = fold_build2 (MULT_EXPR, itype, t, step);
6722 if (POINTER_TYPE_P (type))
6723 t = fold_build_pointer_plus (n1, t);
6724 else
6725 t = fold_build2 (PLUS_EXPR, type, t, n1);
6726 t = fold_convert (TREE_TYPE (startvar), t);
6727 t = force_gimple_operand_gsi (&gsi, t,
6728 DECL_P (startvar)
6729 && TREE_ADDRESSABLE (startvar),
6730 NULL_TREE, false, GSI_CONTINUE_LINKING);
6731 assign_stmt = gimple_build_assign (startvar, t);
6732 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6734 t = fold_convert (itype, e0);
6735 t = fold_build2 (MULT_EXPR, itype, t, step);
6736 if (POINTER_TYPE_P (type))
6737 t = fold_build_pointer_plus (n1, t);
6738 else
6739 t = fold_build2 (PLUS_EXPR, type, t, n1);
6740 t = fold_convert (TREE_TYPE (startvar), t);
6741 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6742 false, GSI_CONTINUE_LINKING);
6743 if (endvar)
6745 assign_stmt = gimple_build_assign (endvar, e);
6746 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6747 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6748 assign_stmt = gimple_build_assign (fd->loop.v, e);
6749 else
6750 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
6751 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6753 if (fd->collapse > 1)
6754 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6756 if (!broken_loop)
6758 /* The code controlling the sequential loop replaces the
6759 GIMPLE_OMP_CONTINUE. */
6760 gsi = gsi_last_bb (cont_bb);
6761 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
6762 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
6763 vmain = gimple_omp_continue_control_use (cont_stmt);
6764 vback = gimple_omp_continue_control_def (cont_stmt);
6766 if (!gimple_omp_for_combined_p (fd->for_stmt))
6768 if (POINTER_TYPE_P (type))
6769 t = fold_build_pointer_plus (vmain, step);
6770 else
6771 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6772 t = force_gimple_operand_gsi (&gsi, t,
6773 DECL_P (vback)
6774 && TREE_ADDRESSABLE (vback),
6775 NULL_TREE, true, GSI_SAME_STMT);
6776 assign_stmt = gimple_build_assign (vback, t);
6777 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6779 t = build2 (fd->loop.cond_code, boolean_type_node,
6780 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6781 ? t : vback, e);
6782 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6785 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6786 gsi_remove (&gsi, true);
6788 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6789 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6792 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6793 gsi = gsi_last_bb (exit_bb);
6794 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6796 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6797 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
6798 gcc_checking_assert (t == NULL_TREE);
6799 else
6800 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6802 gsi_remove (&gsi, true);
6804 /* Connect all the blocks. */
6805 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6806 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6807 ep = find_edge (entry_bb, second_bb);
6808 ep->flags = EDGE_TRUE_VALUE;
6809 ep->probability = REG_BR_PROB_BASE / 4;
6810 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6811 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6813 if (!broken_loop)
6815 ep = find_edge (cont_bb, body_bb);
6816 if (gimple_omp_for_combined_p (fd->for_stmt))
6818 remove_edge (ep);
6819 ep = NULL;
6821 else if (fd->collapse > 1)
6823 remove_edge (ep);
6824 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6826 else
6827 ep->flags = EDGE_TRUE_VALUE;
6828 find_edge (cont_bb, fin_bb)->flags
6829 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6832 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6833 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6834 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6836 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6837 recompute_dominator (CDI_DOMINATORS, body_bb));
6838 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6839 recompute_dominator (CDI_DOMINATORS, fin_bb));
6841 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6843 struct loop *loop = alloc_loop ();
6844 loop->header = body_bb;
6845 if (collapse_bb == NULL)
6846 loop->latch = cont_bb;
6847 add_loop (loop, body_bb->loop_father);
6852 /* A subroutine of expand_omp_for. Generate code for a parallel
6853 loop with static schedule and a specified chunk size. Given
6854 parameters:
6856 for (V = N1; V cond N2; V += STEP) BODY;
6858 where COND is "<" or ">", we generate pseudocode
6860 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6861 if (cond is <)
6862 adj = STEP - 1;
6863 else
6864 adj = STEP + 1;
6865 if ((__typeof (V)) -1 > 0 && cond is >)
6866 n = -(adj + N2 - N1) / -STEP;
6867 else
6868 n = (adj + N2 - N1) / STEP;
6869 trip = 0;
6870 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6871 here so that V is defined
6872 if the loop is not entered
6874 s0 = (trip * nthreads + threadid) * CHUNK;
6875 e0 = min(s0 + CHUNK, n);
6876 if (s0 < n) goto L1; else goto L4;
6878 V = s0 * STEP + N1;
6879 e = e0 * STEP + N1;
6881 BODY;
6882 V += STEP;
6883 if (V cond e) goto L2; else goto L3;
6885 trip += 1;
6886 goto L0;
6890 static void
6891 expand_omp_for_static_chunk (struct omp_region *region,
6892 struct omp_for_data *fd, gimple inner_stmt)
6894 tree n, s0, e0, e, t;
6895 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6896 tree type, itype, vmain, vback, vextra;
6897 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6898 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6899 gimple_stmt_iterator gsi;
6900 edge se;
6901 bool broken_loop = region->cont == NULL;
6902 tree *counts = NULL;
6903 tree n1, n2, step;
6905 gcc_checking_assert ((gimple_omp_for_kind (fd->for_stmt)
6906 != GF_OMP_FOR_KIND_OACC_LOOP)
6907 || !inner_stmt);
6909 itype = type = TREE_TYPE (fd->loop.v);
6910 if (POINTER_TYPE_P (type))
6911 itype = signed_type_for (type);
6913 entry_bb = region->entry;
6914 se = split_block (entry_bb, last_stmt (entry_bb));
6915 entry_bb = se->src;
6916 iter_part_bb = se->dest;
6917 cont_bb = region->cont;
6918 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6919 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6920 gcc_assert (broken_loop
6921 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6922 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6923 body_bb = single_succ (seq_start_bb);
6924 if (!broken_loop)
6926 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6927 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6928 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6930 exit_bb = region->exit;
6932 /* Trip and adjustment setup goes in ENTRY_BB. */
6933 gsi = gsi_last_bb (entry_bb);
6934 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6936 if (fd->collapse > 1)
6938 int first_zero_iter = -1;
6939 basic_block l2_dom_bb = NULL;
6941 counts = XALLOCAVEC (tree, fd->collapse);
6942 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6943 fin_bb, first_zero_iter,
6944 l2_dom_bb);
6945 t = NULL_TREE;
6947 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6948 t = integer_one_node;
6949 else
6950 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6951 fold_convert (type, fd->loop.n1),
6952 fold_convert (type, fd->loop.n2));
6953 if (fd->collapse == 1
6954 && TYPE_UNSIGNED (type)
6955 && (t == NULL_TREE || !integer_onep (t)))
6957 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6958 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6959 true, GSI_SAME_STMT);
6960 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6961 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6962 true, GSI_SAME_STMT);
6963 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6964 NULL_TREE, NULL_TREE);
6965 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6966 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6967 expand_omp_regimplify_p, NULL, NULL)
6968 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6969 expand_omp_regimplify_p, NULL, NULL))
6971 gsi = gsi_for_stmt (cond_stmt);
6972 gimple_regimplify_operands (cond_stmt, &gsi);
6974 se = split_block (entry_bb, cond_stmt);
6975 se->flags = EDGE_TRUE_VALUE;
6976 entry_bb = se->dest;
6977 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6978 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6979 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6980 if (gimple_in_ssa_p (cfun))
6982 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6983 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
6984 !gsi_end_p (gpi); gsi_next (&gpi))
6986 gphi *phi = gpi.phi ();
6987 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6988 se, UNKNOWN_LOCATION);
6991 gsi = gsi_last_bb (entry_bb);
6994 switch (gimple_omp_for_kind (fd->for_stmt))
6996 case GF_OMP_FOR_KIND_FOR:
6997 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
6998 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6999 break;
7000 case GF_OMP_FOR_KIND_DISTRIBUTE:
7001 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
7002 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
7003 break;
7004 case GF_OMP_FOR_KIND_OACC_LOOP:
7005 nthreads = builtin_decl_explicit (BUILT_IN_GOACC_GET_NUM_THREADS);
7006 threadid = builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM);
7007 break;
7008 default:
7009 gcc_unreachable ();
7011 nthreads = build_call_expr (nthreads, 0);
7012 nthreads = fold_convert (itype, nthreads);
7013 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
7014 true, GSI_SAME_STMT);
7015 threadid = build_call_expr (threadid, 0);
7016 threadid = fold_convert (itype, threadid);
7017 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
7018 true, GSI_SAME_STMT);
7020 n1 = fd->loop.n1;
7021 n2 = fd->loop.n2;
7022 step = fd->loop.step;
7023 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7025 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7026 OMP_CLAUSE__LOOPTEMP_);
7027 gcc_assert (innerc);
7028 n1 = OMP_CLAUSE_DECL (innerc);
7029 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7030 OMP_CLAUSE__LOOPTEMP_);
7031 gcc_assert (innerc);
7032 n2 = OMP_CLAUSE_DECL (innerc);
7034 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
7035 true, NULL_TREE, true, GSI_SAME_STMT);
7036 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
7037 true, NULL_TREE, true, GSI_SAME_STMT);
7038 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
7039 true, NULL_TREE, true, GSI_SAME_STMT);
7040 fd->chunk_size
7041 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
7042 true, NULL_TREE, true, GSI_SAME_STMT);
7044 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
7045 t = fold_build2 (PLUS_EXPR, itype, step, t);
7046 t = fold_build2 (PLUS_EXPR, itype, t, n2);
7047 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
7048 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
7049 t = fold_build2 (TRUNC_DIV_EXPR, itype,
7050 fold_build1 (NEGATE_EXPR, itype, t),
7051 fold_build1 (NEGATE_EXPR, itype, step));
7052 else
7053 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
7054 t = fold_convert (itype, t);
7055 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7056 true, GSI_SAME_STMT);
7058 trip_var = create_tmp_reg (itype, ".trip");
7059 if (gimple_in_ssa_p (cfun))
7061 trip_init = make_ssa_name (trip_var);
7062 trip_main = make_ssa_name (trip_var);
7063 trip_back = make_ssa_name (trip_var);
7065 else
7067 trip_init = trip_var;
7068 trip_main = trip_var;
7069 trip_back = trip_var;
7072 gassign *assign_stmt
7073 = gimple_build_assign (trip_init, build_int_cst (itype, 0));
7074 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
7076 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
7077 t = fold_build2 (MULT_EXPR, itype, t, step);
7078 if (POINTER_TYPE_P (type))
7079 t = fold_build_pointer_plus (n1, t);
7080 else
7081 t = fold_build2 (PLUS_EXPR, type, t, n1);
7082 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7083 true, GSI_SAME_STMT);
7085 /* Remove the GIMPLE_OMP_FOR. */
7086 gsi_remove (&gsi, true);
7088 /* Iteration space partitioning goes in ITER_PART_BB. */
7089 gsi = gsi_last_bb (iter_part_bb);
7091 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
7092 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
7093 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
7094 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7095 false, GSI_CONTINUE_LINKING);
7097 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
7098 t = fold_build2 (MIN_EXPR, itype, t, n);
7099 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7100 false, GSI_CONTINUE_LINKING);
7102 t = build2 (LT_EXPR, boolean_type_node, s0, n);
7103 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
7105 /* Setup code for sequential iteration goes in SEQ_START_BB. */
7106 gsi = gsi_start_bb (seq_start_bb);
7108 tree startvar = fd->loop.v;
7109 tree endvar = NULL_TREE;
7111 if (gimple_omp_for_combined_p (fd->for_stmt))
7113 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
7114 ? gimple_omp_parallel_clauses (inner_stmt)
7115 : gimple_omp_for_clauses (inner_stmt);
7116 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
7117 gcc_assert (innerc);
7118 startvar = OMP_CLAUSE_DECL (innerc);
7119 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7120 OMP_CLAUSE__LOOPTEMP_);
7121 gcc_assert (innerc);
7122 endvar = OMP_CLAUSE_DECL (innerc);
7125 t = fold_convert (itype, s0);
7126 t = fold_build2 (MULT_EXPR, itype, t, step);
7127 if (POINTER_TYPE_P (type))
7128 t = fold_build_pointer_plus (n1, t);
7129 else
7130 t = fold_build2 (PLUS_EXPR, type, t, n1);
7131 t = fold_convert (TREE_TYPE (startvar), t);
7132 t = force_gimple_operand_gsi (&gsi, t,
7133 DECL_P (startvar)
7134 && TREE_ADDRESSABLE (startvar),
7135 NULL_TREE, false, GSI_CONTINUE_LINKING);
7136 assign_stmt = gimple_build_assign (startvar, t);
7137 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
7139 t = fold_convert (itype, e0);
7140 t = fold_build2 (MULT_EXPR, itype, t, step);
7141 if (POINTER_TYPE_P (type))
7142 t = fold_build_pointer_plus (n1, t);
7143 else
7144 t = fold_build2 (PLUS_EXPR, type, t, n1);
7145 t = fold_convert (TREE_TYPE (startvar), t);
7146 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7147 false, GSI_CONTINUE_LINKING);
7148 if (endvar)
7150 assign_stmt = gimple_build_assign (endvar, e);
7151 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
7152 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
7153 assign_stmt = gimple_build_assign (fd->loop.v, e);
7154 else
7155 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
7156 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
7158 if (fd->collapse > 1)
7159 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
7161 if (!broken_loop)
7163 /* The code controlling the sequential loop goes in CONT_BB,
7164 replacing the GIMPLE_OMP_CONTINUE. */
7165 gsi = gsi_last_bb (cont_bb);
7166 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
7167 vmain = gimple_omp_continue_control_use (cont_stmt);
7168 vback = gimple_omp_continue_control_def (cont_stmt);
7170 if (!gimple_omp_for_combined_p (fd->for_stmt))
7172 if (POINTER_TYPE_P (type))
7173 t = fold_build_pointer_plus (vmain, step);
7174 else
7175 t = fold_build2 (PLUS_EXPR, type, vmain, step);
7176 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
7177 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7178 true, GSI_SAME_STMT);
7179 assign_stmt = gimple_build_assign (vback, t);
7180 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
7182 t = build2 (fd->loop.cond_code, boolean_type_node,
7183 DECL_P (vback) && TREE_ADDRESSABLE (vback)
7184 ? t : vback, e);
7185 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
7188 /* Remove GIMPLE_OMP_CONTINUE. */
7189 gsi_remove (&gsi, true);
7191 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
7192 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
7194 /* Trip update code goes into TRIP_UPDATE_BB. */
7195 gsi = gsi_start_bb (trip_update_bb);
7197 t = build_int_cst (itype, 1);
7198 t = build2 (PLUS_EXPR, itype, trip_main, t);
7199 assign_stmt = gimple_build_assign (trip_back, t);
7200 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
7203 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
7204 gsi = gsi_last_bb (exit_bb);
7205 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
7207 t = gimple_omp_return_lhs (gsi_stmt (gsi));
7208 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
7209 gcc_checking_assert (t == NULL_TREE);
7210 else
7211 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
7213 gsi_remove (&gsi, true);
7215 /* Connect the new blocks. */
7216 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
7217 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
7219 if (!broken_loop)
7221 se = find_edge (cont_bb, body_bb);
7222 if (gimple_omp_for_combined_p (fd->for_stmt))
7224 remove_edge (se);
7225 se = NULL;
7227 else if (fd->collapse > 1)
7229 remove_edge (se);
7230 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
7232 else
7233 se->flags = EDGE_TRUE_VALUE;
7234 find_edge (cont_bb, trip_update_bb)->flags
7235 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
7237 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
7240 if (gimple_in_ssa_p (cfun))
7242 gphi_iterator psi;
7243 gphi *phi;
7244 edge re, ene;
7245 edge_var_map *vm;
7246 size_t i;
7248 gcc_assert (fd->collapse == 1 && !broken_loop);
7250 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
7251 remove arguments of the phi nodes in fin_bb. We need to create
7252 appropriate phi nodes in iter_part_bb instead. */
7253 se = single_pred_edge (fin_bb);
7254 re = single_succ_edge (trip_update_bb);
7255 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
7256 ene = single_succ_edge (entry_bb);
7258 psi = gsi_start_phis (fin_bb);
7259 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
7260 gsi_next (&psi), ++i)
7262 gphi *nphi;
7263 source_location locus;
7265 phi = psi.phi ();
7266 t = gimple_phi_result (phi);
7267 gcc_assert (t == redirect_edge_var_map_result (vm));
7268 nphi = create_phi_node (t, iter_part_bb);
7270 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
7271 locus = gimple_phi_arg_location_from_edge (phi, se);
7273 /* A special case -- fd->loop.v is not yet computed in
7274 iter_part_bb, we need to use vextra instead. */
7275 if (t == fd->loop.v)
7276 t = vextra;
7277 add_phi_arg (nphi, t, ene, locus);
7278 locus = redirect_edge_var_map_location (vm);
7279 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
7281 gcc_assert (gsi_end_p (psi) && i == head->length ());
7282 redirect_edge_var_map_clear (re);
7283 while (1)
7285 psi = gsi_start_phis (fin_bb);
7286 if (gsi_end_p (psi))
7287 break;
7288 remove_phi_node (&psi, false);
7291 /* Make phi node for trip. */
7292 phi = create_phi_node (trip_main, iter_part_bb);
7293 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
7294 UNKNOWN_LOCATION);
7295 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
7296 UNKNOWN_LOCATION);
7299 if (!broken_loop)
7300 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
7301 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
7302 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
7303 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
7304 recompute_dominator (CDI_DOMINATORS, fin_bb));
7305 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
7306 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
7307 set_immediate_dominator (CDI_DOMINATORS, body_bb,
7308 recompute_dominator (CDI_DOMINATORS, body_bb));
7310 if (!broken_loop)
7312 struct loop *trip_loop = alloc_loop ();
7313 trip_loop->header = iter_part_bb;
7314 trip_loop->latch = trip_update_bb;
7315 add_loop (trip_loop, iter_part_bb->loop_father);
7317 if (!gimple_omp_for_combined_p (fd->for_stmt))
7319 struct loop *loop = alloc_loop ();
7320 loop->header = body_bb;
7321 if (collapse_bb == NULL)
7322 loop->latch = cont_bb;
7323 add_loop (loop, trip_loop);
7328 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
7329 Given parameters:
7330 for (V = N1; V cond N2; V += STEP) BODY;
7332 where COND is "<" or ">" or "!=", we generate pseudocode
7334 for (ind_var = low; ind_var < high; ind_var++)
7336 V = n1 + (ind_var * STEP)
7338 <BODY>
7341 In the above pseudocode, low and high are function parameters of the
7342 child function. In the function below, we are inserting a temp.
7343 variable that will be making a call to two OMP functions that will not be
7344 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
7345 with _Cilk_for). These functions are replaced with low and high
7346 by the function that handles taskreg. */
7349 static void
7350 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
7352 bool broken_loop = region->cont == NULL;
7353 basic_block entry_bb = region->entry;
7354 basic_block cont_bb = region->cont;
7356 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7357 gcc_assert (broken_loop
7358 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7359 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7360 basic_block l1_bb, l2_bb;
7362 if (!broken_loop)
7364 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7365 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7366 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7367 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7369 else
7371 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7372 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7373 l2_bb = single_succ (l1_bb);
7375 basic_block exit_bb = region->exit;
7376 basic_block l2_dom_bb = NULL;
7378 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
7380 /* Below statements until the "tree high_val = ..." are pseudo statements
7381 used to pass information to be used by expand_omp_taskreg.
7382 low_val and high_val will be replaced by the __low and __high
7383 parameter from the child function.
7385 The call_exprs part is a place-holder, it is mainly used
7386 to distinctly identify to the top-level part that this is
7387 where we should put low and high (reasoning given in header
7388 comment). */
7390 tree child_fndecl
7391 = gimple_omp_parallel_child_fn (
7392 as_a <gomp_parallel *> (last_stmt (region->outer->entry)));
7393 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
7394 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
7396 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
7397 high_val = t;
7398 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
7399 low_val = t;
7401 gcc_assert (low_val && high_val);
7403 tree type = TREE_TYPE (low_val);
7404 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
7405 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7407 /* Not needed in SSA form right now. */
7408 gcc_assert (!gimple_in_ssa_p (cfun));
7409 if (l2_dom_bb == NULL)
7410 l2_dom_bb = l1_bb;
7412 tree n1 = low_val;
7413 tree n2 = high_val;
7415 gimple stmt = gimple_build_assign (ind_var, n1);
7417 /* Replace the GIMPLE_OMP_FOR statement. */
7418 gsi_replace (&gsi, stmt, true);
7420 if (!broken_loop)
7422 /* Code to control the increment goes in the CONT_BB. */
7423 gsi = gsi_last_bb (cont_bb);
7424 stmt = gsi_stmt (gsi);
7425 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7426 stmt = gimple_build_assign (ind_var, PLUS_EXPR, ind_var,
7427 build_one_cst (type));
7429 /* Replace GIMPLE_OMP_CONTINUE. */
7430 gsi_replace (&gsi, stmt, true);
7433 /* Emit the condition in L1_BB. */
7434 gsi = gsi_after_labels (l1_bb);
7435 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
7436 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
7437 fd->loop.step);
7438 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
7439 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
7440 fd->loop.n1, fold_convert (sizetype, t));
7441 else
7442 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
7443 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
7444 t = fold_convert (TREE_TYPE (fd->loop.v), t);
7445 expand_omp_build_assign (&gsi, fd->loop.v, t);
7447 /* The condition is always '<' since the runtime will fill in the low
7448 and high values. */
7449 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
7450 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
7452 /* Remove GIMPLE_OMP_RETURN. */
7453 gsi = gsi_last_bb (exit_bb);
7454 gsi_remove (&gsi, true);
7456 /* Connect the new blocks. */
7457 remove_edge (FALLTHRU_EDGE (entry_bb));
7459 edge e, ne;
7460 if (!broken_loop)
7462 remove_edge (BRANCH_EDGE (entry_bb));
7463 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7465 e = BRANCH_EDGE (l1_bb);
7466 ne = FALLTHRU_EDGE (l1_bb);
7467 e->flags = EDGE_TRUE_VALUE;
7469 else
7471 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7473 ne = single_succ_edge (l1_bb);
7474 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7477 ne->flags = EDGE_FALSE_VALUE;
7478 e->probability = REG_BR_PROB_BASE * 7 / 8;
7479 ne->probability = REG_BR_PROB_BASE / 8;
7481 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7482 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7483 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7485 if (!broken_loop)
7487 struct loop *loop = alloc_loop ();
7488 loop->header = l1_bb;
7489 loop->latch = cont_bb;
7490 add_loop (loop, l1_bb->loop_father);
7491 loop->safelen = INT_MAX;
7494 /* Pick the correct library function based on the precision of the
7495 induction variable type. */
7496 tree lib_fun = NULL_TREE;
7497 if (TYPE_PRECISION (type) == 32)
7498 lib_fun = cilk_for_32_fndecl;
7499 else if (TYPE_PRECISION (type) == 64)
7500 lib_fun = cilk_for_64_fndecl;
7501 else
7502 gcc_unreachable ();
7504 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
7506 /* WS_ARGS contains the library function flavor to call:
7507 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
7508 user-defined grain value. If the user does not define one, then zero
7509 is passed in by the parser. */
7510 vec_alloc (region->ws_args, 2);
7511 region->ws_args->quick_push (lib_fun);
7512 region->ws_args->quick_push (fd->chunk_size);
7515 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
7516 loop. Given parameters:
7518 for (V = N1; V cond N2; V += STEP) BODY;
7520 where COND is "<" or ">", we generate pseudocode
7522 V = N1;
7523 goto L1;
7525 BODY;
7526 V += STEP;
7528 if (V cond N2) goto L0; else goto L2;
7531 For collapsed loops, given parameters:
7532 collapse(3)
7533 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
7534 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
7535 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
7536 BODY;
7538 we generate pseudocode
7540 if (cond3 is <)
7541 adj = STEP3 - 1;
7542 else
7543 adj = STEP3 + 1;
7544 count3 = (adj + N32 - N31) / STEP3;
7545 if (cond2 is <)
7546 adj = STEP2 - 1;
7547 else
7548 adj = STEP2 + 1;
7549 count2 = (adj + N22 - N21) / STEP2;
7550 if (cond1 is <)
7551 adj = STEP1 - 1;
7552 else
7553 adj = STEP1 + 1;
7554 count1 = (adj + N12 - N11) / STEP1;
7555 count = count1 * count2 * count3;
7556 V = 0;
7557 V1 = N11;
7558 V2 = N21;
7559 V3 = N31;
7560 goto L1;
7562 BODY;
7563 V += 1;
7564 V3 += STEP3;
7565 V2 += (V3 cond3 N32) ? 0 : STEP2;
7566 V3 = (V3 cond3 N32) ? V3 : N31;
7567 V1 += (V2 cond2 N22) ? 0 : STEP1;
7568 V2 = (V2 cond2 N22) ? V2 : N21;
7570 if (V < count) goto L0; else goto L2;
7575 static void
7576 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
7578 tree type, t;
7579 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
7580 gimple_stmt_iterator gsi;
7581 gimple stmt;
7582 gcond *cond_stmt;
7583 bool broken_loop = region->cont == NULL;
7584 edge e, ne;
7585 tree *counts = NULL;
7586 int i;
7587 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7588 OMP_CLAUSE_SAFELEN);
7589 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7590 OMP_CLAUSE__SIMDUID_);
7591 tree n1, n2;
7593 type = TREE_TYPE (fd->loop.v);
7594 entry_bb = region->entry;
7595 cont_bb = region->cont;
7596 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7597 gcc_assert (broken_loop
7598 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7599 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7600 if (!broken_loop)
7602 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7603 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7604 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7605 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7607 else
7609 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7610 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7611 l2_bb = single_succ (l1_bb);
7613 exit_bb = region->exit;
7614 l2_dom_bb = NULL;
7616 gsi = gsi_last_bb (entry_bb);
7618 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7619 /* Not needed in SSA form right now. */
7620 gcc_assert (!gimple_in_ssa_p (cfun));
7621 if (fd->collapse > 1)
7623 int first_zero_iter = -1;
7624 basic_block zero_iter_bb = l2_bb;
7626 counts = XALLOCAVEC (tree, fd->collapse);
7627 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
7628 zero_iter_bb, first_zero_iter,
7629 l2_dom_bb);
7631 if (l2_dom_bb == NULL)
7632 l2_dom_bb = l1_bb;
7634 n1 = fd->loop.n1;
7635 n2 = fd->loop.n2;
7636 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7638 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7639 OMP_CLAUSE__LOOPTEMP_);
7640 gcc_assert (innerc);
7641 n1 = OMP_CLAUSE_DECL (innerc);
7642 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7643 OMP_CLAUSE__LOOPTEMP_);
7644 gcc_assert (innerc);
7645 n2 = OMP_CLAUSE_DECL (innerc);
7646 expand_omp_build_assign (&gsi, fd->loop.v,
7647 fold_convert (type, n1));
7648 if (fd->collapse > 1)
7650 gsi_prev (&gsi);
7651 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
7652 gsi_next (&gsi);
7655 else
7657 expand_omp_build_assign (&gsi, fd->loop.v,
7658 fold_convert (type, fd->loop.n1));
7659 if (fd->collapse > 1)
7660 for (i = 0; i < fd->collapse; i++)
7662 tree itype = TREE_TYPE (fd->loops[i].v);
7663 if (POINTER_TYPE_P (itype))
7664 itype = signed_type_for (itype);
7665 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
7666 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7670 /* Remove the GIMPLE_OMP_FOR statement. */
7671 gsi_remove (&gsi, true);
7673 if (!broken_loop)
7675 /* Code to control the increment goes in the CONT_BB. */
7676 gsi = gsi_last_bb (cont_bb);
7677 stmt = gsi_stmt (gsi);
7678 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7680 if (POINTER_TYPE_P (type))
7681 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
7682 else
7683 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
7684 expand_omp_build_assign (&gsi, fd->loop.v, t);
7686 if (fd->collapse > 1)
7688 i = fd->collapse - 1;
7689 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
7691 t = fold_convert (sizetype, fd->loops[i].step);
7692 t = fold_build_pointer_plus (fd->loops[i].v, t);
7694 else
7696 t = fold_convert (TREE_TYPE (fd->loops[i].v),
7697 fd->loops[i].step);
7698 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
7699 fd->loops[i].v, t);
7701 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7703 for (i = fd->collapse - 1; i > 0; i--)
7705 tree itype = TREE_TYPE (fd->loops[i].v);
7706 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
7707 if (POINTER_TYPE_P (itype2))
7708 itype2 = signed_type_for (itype2);
7709 t = build3 (COND_EXPR, itype2,
7710 build2 (fd->loops[i].cond_code, boolean_type_node,
7711 fd->loops[i].v,
7712 fold_convert (itype, fd->loops[i].n2)),
7713 build_int_cst (itype2, 0),
7714 fold_convert (itype2, fd->loops[i - 1].step));
7715 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
7716 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
7717 else
7718 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
7719 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
7721 t = build3 (COND_EXPR, itype,
7722 build2 (fd->loops[i].cond_code, boolean_type_node,
7723 fd->loops[i].v,
7724 fold_convert (itype, fd->loops[i].n2)),
7725 fd->loops[i].v,
7726 fold_convert (itype, fd->loops[i].n1));
7727 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7731 /* Remove GIMPLE_OMP_CONTINUE. */
7732 gsi_remove (&gsi, true);
7735 /* Emit the condition in L1_BB. */
7736 gsi = gsi_start_bb (l1_bb);
7738 t = fold_convert (type, n2);
7739 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7740 false, GSI_CONTINUE_LINKING);
7741 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
7742 cond_stmt = gimple_build_cond_empty (t);
7743 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
7744 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p,
7745 NULL, NULL)
7746 || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p,
7747 NULL, NULL))
7749 gsi = gsi_for_stmt (cond_stmt);
7750 gimple_regimplify_operands (cond_stmt, &gsi);
7753 /* Remove GIMPLE_OMP_RETURN. */
7754 gsi = gsi_last_bb (exit_bb);
7755 gsi_remove (&gsi, true);
7757 /* Connect the new blocks. */
7758 remove_edge (FALLTHRU_EDGE (entry_bb));
7760 if (!broken_loop)
7762 remove_edge (BRANCH_EDGE (entry_bb));
7763 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7765 e = BRANCH_EDGE (l1_bb);
7766 ne = FALLTHRU_EDGE (l1_bb);
7767 e->flags = EDGE_TRUE_VALUE;
7769 else
7771 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7773 ne = single_succ_edge (l1_bb);
7774 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7777 ne->flags = EDGE_FALSE_VALUE;
7778 e->probability = REG_BR_PROB_BASE * 7 / 8;
7779 ne->probability = REG_BR_PROB_BASE / 8;
7781 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7782 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7783 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7785 if (!broken_loop)
7787 struct loop *loop = alloc_loop ();
7788 loop->header = l1_bb;
7789 loop->latch = cont_bb;
7790 add_loop (loop, l1_bb->loop_father);
7791 if (safelen == NULL_TREE)
7792 loop->safelen = INT_MAX;
7793 else
7795 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
7796 if (TREE_CODE (safelen) != INTEGER_CST)
7797 loop->safelen = 0;
7798 else if (!tree_fits_uhwi_p (safelen)
7799 || tree_to_uhwi (safelen) > INT_MAX)
7800 loop->safelen = INT_MAX;
7801 else
7802 loop->safelen = tree_to_uhwi (safelen);
7803 if (loop->safelen == 1)
7804 loop->safelen = 0;
7806 if (simduid)
7808 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
7809 cfun->has_simduid_loops = true;
7811 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7812 the loop. */
7813 if ((flag_tree_loop_vectorize
7814 || (!global_options_set.x_flag_tree_loop_vectorize
7815 && !global_options_set.x_flag_tree_vectorize))
7816 && flag_tree_loop_optimize
7817 && loop->safelen > 1)
7819 loop->force_vectorize = true;
7820 cfun->has_force_vectorize_loops = true;
7826 /* Expand the OMP loop defined by REGION. */
7828 static void
7829 expand_omp_for (struct omp_region *region, gimple inner_stmt)
7831 struct omp_for_data fd;
7832 struct omp_for_data_loop *loops;
7834 loops
7835 = (struct omp_for_data_loop *)
7836 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
7837 * sizeof (struct omp_for_data_loop));
7838 extract_omp_for_data (as_a <gomp_for *> (last_stmt (region->entry)),
7839 &fd, loops);
7840 region->sched_kind = fd.sched_kind;
7842 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
7843 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7844 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7845 if (region->cont)
7847 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
7848 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7849 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7851 else
7852 /* If there isn't a continue then this is a degerate case where
7853 the introduction of abnormal edges during lowering will prevent
7854 original loops from being detected. Fix that up. */
7855 loops_state_set (LOOPS_NEED_FIXUP);
7857 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
7858 expand_omp_simd (region, &fd);
7859 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
7860 expand_cilk_for (region, &fd);
7861 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
7862 && !fd.have_ordered)
7864 if (fd.chunk_size == NULL)
7865 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
7866 else
7867 expand_omp_for_static_chunk (region, &fd, inner_stmt);
7869 else
7871 int fn_index, start_ix, next_ix;
7873 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
7874 == GF_OMP_FOR_KIND_FOR);
7875 if (fd.chunk_size == NULL
7876 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
7877 fd.chunk_size = integer_zero_node;
7878 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
7879 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
7880 ? 3 : fd.sched_kind;
7881 fn_index += fd.have_ordered * 4;
7882 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
7883 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
7884 if (fd.iter_type == long_long_unsigned_type_node)
7886 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
7887 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
7888 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
7889 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
7891 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
7892 (enum built_in_function) next_ix, inner_stmt);
7895 if (gimple_in_ssa_p (cfun))
7896 update_ssa (TODO_update_ssa_only_virtuals);
7900 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7902 v = GOMP_sections_start (n);
7904 switch (v)
7906 case 0:
7907 goto L2;
7908 case 1:
7909 section 1;
7910 goto L1;
7911 case 2:
7913 case n:
7915 default:
7916 abort ();
7919 v = GOMP_sections_next ();
7920 goto L0;
7922 reduction;
7924 If this is a combined parallel sections, replace the call to
7925 GOMP_sections_start with call to GOMP_sections_next. */
7927 static void
7928 expand_omp_sections (struct omp_region *region)
7930 tree t, u, vin = NULL, vmain, vnext, l2;
7931 unsigned len;
7932 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7933 gimple_stmt_iterator si, switch_si;
7934 gomp_sections *sections_stmt;
7935 gimple stmt;
7936 gomp_continue *cont;
7937 edge_iterator ei;
7938 edge e;
7939 struct omp_region *inner;
7940 unsigned i, casei;
7941 bool exit_reachable = region->cont != NULL;
7943 gcc_assert (region->exit != NULL);
7944 entry_bb = region->entry;
7945 l0_bb = single_succ (entry_bb);
7946 l1_bb = region->cont;
7947 l2_bb = region->exit;
7948 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7949 l2 = gimple_block_label (l2_bb);
7950 else
7952 /* This can happen if there are reductions. */
7953 len = EDGE_COUNT (l0_bb->succs);
7954 gcc_assert (len > 0);
7955 e = EDGE_SUCC (l0_bb, len - 1);
7956 si = gsi_last_bb (e->dest);
7957 l2 = NULL_TREE;
7958 if (gsi_end_p (si)
7959 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7960 l2 = gimple_block_label (e->dest);
7961 else
7962 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7964 si = gsi_last_bb (e->dest);
7965 if (gsi_end_p (si)
7966 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7968 l2 = gimple_block_label (e->dest);
7969 break;
7973 if (exit_reachable)
7974 default_bb = create_empty_bb (l1_bb->prev_bb);
7975 else
7976 default_bb = create_empty_bb (l0_bb);
7978 /* We will build a switch() with enough cases for all the
7979 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7980 and a default case to abort if something goes wrong. */
7981 len = EDGE_COUNT (l0_bb->succs);
7983 /* Use vec::quick_push on label_vec throughout, since we know the size
7984 in advance. */
7985 auto_vec<tree> label_vec (len);
7987 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7988 GIMPLE_OMP_SECTIONS statement. */
7989 si = gsi_last_bb (entry_bb);
7990 sections_stmt = as_a <gomp_sections *> (gsi_stmt (si));
7991 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7992 vin = gimple_omp_sections_control (sections_stmt);
7993 if (!is_combined_parallel (region))
7995 /* If we are not inside a combined parallel+sections region,
7996 call GOMP_sections_start. */
7997 t = build_int_cst (unsigned_type_node, len - 1);
7998 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7999 stmt = gimple_build_call (u, 1, t);
8001 else
8003 /* Otherwise, call GOMP_sections_next. */
8004 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
8005 stmt = gimple_build_call (u, 0);
8007 gimple_call_set_lhs (stmt, vin);
8008 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
8009 gsi_remove (&si, true);
8011 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
8012 L0_BB. */
8013 switch_si = gsi_last_bb (l0_bb);
8014 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
8015 if (exit_reachable)
8017 cont = as_a <gomp_continue *> (last_stmt (l1_bb));
8018 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
8019 vmain = gimple_omp_continue_control_use (cont);
8020 vnext = gimple_omp_continue_control_def (cont);
8022 else
8024 vmain = vin;
8025 vnext = NULL_TREE;
8028 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
8029 label_vec.quick_push (t);
8030 i = 1;
8032 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
8033 for (inner = region->inner, casei = 1;
8034 inner;
8035 inner = inner->next, i++, casei++)
8037 basic_block s_entry_bb, s_exit_bb;
8039 /* Skip optional reduction region. */
8040 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
8042 --i;
8043 --casei;
8044 continue;
8047 s_entry_bb = inner->entry;
8048 s_exit_bb = inner->exit;
8050 t = gimple_block_label (s_entry_bb);
8051 u = build_int_cst (unsigned_type_node, casei);
8052 u = build_case_label (u, NULL, t);
8053 label_vec.quick_push (u);
8055 si = gsi_last_bb (s_entry_bb);
8056 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
8057 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
8058 gsi_remove (&si, true);
8059 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
8061 if (s_exit_bb == NULL)
8062 continue;
8064 si = gsi_last_bb (s_exit_bb);
8065 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
8066 gsi_remove (&si, true);
8068 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
8071 /* Error handling code goes in DEFAULT_BB. */
8072 t = gimple_block_label (default_bb);
8073 u = build_case_label (NULL, NULL, t);
8074 make_edge (l0_bb, default_bb, 0);
8075 add_bb_to_loop (default_bb, current_loops->tree_root);
8077 stmt = gimple_build_switch (vmain, u, label_vec);
8078 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
8079 gsi_remove (&switch_si, true);
8081 si = gsi_start_bb (default_bb);
8082 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
8083 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
8085 if (exit_reachable)
8087 tree bfn_decl;
8089 /* Code to get the next section goes in L1_BB. */
8090 si = gsi_last_bb (l1_bb);
8091 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
8093 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
8094 stmt = gimple_build_call (bfn_decl, 0);
8095 gimple_call_set_lhs (stmt, vnext);
8096 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
8097 gsi_remove (&si, true);
8099 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
8102 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
8103 si = gsi_last_bb (l2_bb);
8104 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
8105 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
8106 else if (gimple_omp_return_lhs (gsi_stmt (si)))
8107 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
8108 else
8109 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
8110 stmt = gimple_build_call (t, 0);
8111 if (gimple_omp_return_lhs (gsi_stmt (si)))
8112 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
8113 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
8114 gsi_remove (&si, true);
8116 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
8120 /* Expand code for an OpenMP single directive. We've already expanded
8121 much of the code, here we simply place the GOMP_barrier call. */
8123 static void
8124 expand_omp_single (struct omp_region *region)
8126 basic_block entry_bb, exit_bb;
8127 gimple_stmt_iterator si;
8129 entry_bb = region->entry;
8130 exit_bb = region->exit;
8132 si = gsi_last_bb (entry_bb);
8133 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
8134 gsi_remove (&si, true);
8135 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8137 si = gsi_last_bb (exit_bb);
8138 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
8140 tree t = gimple_omp_return_lhs (gsi_stmt (si));
8141 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
8143 gsi_remove (&si, true);
8144 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
8148 /* Generic expansion for OpenMP synchronization directives: master,
8149 ordered and critical. All we need to do here is remove the entry
8150 and exit markers for REGION. */
8152 static void
8153 expand_omp_synch (struct omp_region *region)
8155 basic_block entry_bb, exit_bb;
8156 gimple_stmt_iterator si;
8158 entry_bb = region->entry;
8159 exit_bb = region->exit;
8161 si = gsi_last_bb (entry_bb);
8162 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
8163 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
8164 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
8165 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
8166 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
8167 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
8168 gsi_remove (&si, true);
8169 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8171 if (exit_bb)
8173 si = gsi_last_bb (exit_bb);
8174 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
8175 gsi_remove (&si, true);
8176 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
8180 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
8181 operation as a normal volatile load. */
8183 static bool
8184 expand_omp_atomic_load (basic_block load_bb, tree addr,
8185 tree loaded_val, int index)
8187 enum built_in_function tmpbase;
8188 gimple_stmt_iterator gsi;
8189 basic_block store_bb;
8190 location_t loc;
8191 gimple stmt;
8192 tree decl, call, type, itype;
8194 gsi = gsi_last_bb (load_bb);
8195 stmt = gsi_stmt (gsi);
8196 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
8197 loc = gimple_location (stmt);
8199 /* ??? If the target does not implement atomic_load_optab[mode], and mode
8200 is smaller than word size, then expand_atomic_load assumes that the load
8201 is atomic. We could avoid the builtin entirely in this case. */
8203 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
8204 decl = builtin_decl_explicit (tmpbase);
8205 if (decl == NULL_TREE)
8206 return false;
8208 type = TREE_TYPE (loaded_val);
8209 itype = TREE_TYPE (TREE_TYPE (decl));
8211 call = build_call_expr_loc (loc, decl, 2, addr,
8212 build_int_cst (NULL,
8213 gimple_omp_atomic_seq_cst_p (stmt)
8214 ? MEMMODEL_SEQ_CST
8215 : MEMMODEL_RELAXED));
8216 if (!useless_type_conversion_p (type, itype))
8217 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
8218 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
8220 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
8221 gsi_remove (&gsi, true);
8223 store_bb = single_succ (load_bb);
8224 gsi = gsi_last_bb (store_bb);
8225 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
8226 gsi_remove (&gsi, true);
8228 if (gimple_in_ssa_p (cfun))
8229 update_ssa (TODO_update_ssa_no_phi);
8231 return true;
8234 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
8235 operation as a normal volatile store. */
8237 static bool
8238 expand_omp_atomic_store (basic_block load_bb, tree addr,
8239 tree loaded_val, tree stored_val, int index)
8241 enum built_in_function tmpbase;
8242 gimple_stmt_iterator gsi;
8243 basic_block store_bb = single_succ (load_bb);
8244 location_t loc;
8245 gimple stmt;
8246 tree decl, call, type, itype;
8247 machine_mode imode;
8248 bool exchange;
8250 gsi = gsi_last_bb (load_bb);
8251 stmt = gsi_stmt (gsi);
8252 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
8254 /* If the load value is needed, then this isn't a store but an exchange. */
8255 exchange = gimple_omp_atomic_need_value_p (stmt);
8257 gsi = gsi_last_bb (store_bb);
8258 stmt = gsi_stmt (gsi);
8259 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
8260 loc = gimple_location (stmt);
8262 /* ??? If the target does not implement atomic_store_optab[mode], and mode
8263 is smaller than word size, then expand_atomic_store assumes that the store
8264 is atomic. We could avoid the builtin entirely in this case. */
8266 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
8267 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
8268 decl = builtin_decl_explicit (tmpbase);
8269 if (decl == NULL_TREE)
8270 return false;
8272 type = TREE_TYPE (stored_val);
8274 /* Dig out the type of the function's second argument. */
8275 itype = TREE_TYPE (decl);
8276 itype = TYPE_ARG_TYPES (itype);
8277 itype = TREE_CHAIN (itype);
8278 itype = TREE_VALUE (itype);
8279 imode = TYPE_MODE (itype);
8281 if (exchange && !can_atomic_exchange_p (imode, true))
8282 return false;
8284 if (!useless_type_conversion_p (itype, type))
8285 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
8286 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
8287 build_int_cst (NULL,
8288 gimple_omp_atomic_seq_cst_p (stmt)
8289 ? MEMMODEL_SEQ_CST
8290 : MEMMODEL_RELAXED));
8291 if (exchange)
8293 if (!useless_type_conversion_p (type, itype))
8294 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
8295 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
8298 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
8299 gsi_remove (&gsi, true);
8301 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
8302 gsi = gsi_last_bb (load_bb);
8303 gsi_remove (&gsi, true);
8305 if (gimple_in_ssa_p (cfun))
8306 update_ssa (TODO_update_ssa_no_phi);
8308 return true;
8311 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
8312 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
8313 size of the data type, and thus usable to find the index of the builtin
8314 decl. Returns false if the expression is not of the proper form. */
8316 static bool
8317 expand_omp_atomic_fetch_op (basic_block load_bb,
8318 tree addr, tree loaded_val,
8319 tree stored_val, int index)
8321 enum built_in_function oldbase, newbase, tmpbase;
8322 tree decl, itype, call;
8323 tree lhs, rhs;
8324 basic_block store_bb = single_succ (load_bb);
8325 gimple_stmt_iterator gsi;
8326 gimple stmt;
8327 location_t loc;
8328 enum tree_code code;
8329 bool need_old, need_new;
8330 machine_mode imode;
8331 bool seq_cst;
8333 /* We expect to find the following sequences:
8335 load_bb:
8336 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
8338 store_bb:
8339 val = tmp OP something; (or: something OP tmp)
8340 GIMPLE_OMP_STORE (val)
8342 ???FIXME: Allow a more flexible sequence.
8343 Perhaps use data flow to pick the statements.
8347 gsi = gsi_after_labels (store_bb);
8348 stmt = gsi_stmt (gsi);
8349 loc = gimple_location (stmt);
8350 if (!is_gimple_assign (stmt))
8351 return false;
8352 gsi_next (&gsi);
8353 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
8354 return false;
8355 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
8356 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
8357 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
8358 gcc_checking_assert (!need_old || !need_new);
8360 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
8361 return false;
8363 /* Check for one of the supported fetch-op operations. */
8364 code = gimple_assign_rhs_code (stmt);
8365 switch (code)
8367 case PLUS_EXPR:
8368 case POINTER_PLUS_EXPR:
8369 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
8370 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
8371 break;
8372 case MINUS_EXPR:
8373 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
8374 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
8375 break;
8376 case BIT_AND_EXPR:
8377 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
8378 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
8379 break;
8380 case BIT_IOR_EXPR:
8381 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
8382 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
8383 break;
8384 case BIT_XOR_EXPR:
8385 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
8386 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
8387 break;
8388 default:
8389 return false;
8392 /* Make sure the expression is of the proper form. */
8393 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
8394 rhs = gimple_assign_rhs2 (stmt);
8395 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
8396 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
8397 rhs = gimple_assign_rhs1 (stmt);
8398 else
8399 return false;
8401 tmpbase = ((enum built_in_function)
8402 ((need_new ? newbase : oldbase) + index + 1));
8403 decl = builtin_decl_explicit (tmpbase);
8404 if (decl == NULL_TREE)
8405 return false;
8406 itype = TREE_TYPE (TREE_TYPE (decl));
8407 imode = TYPE_MODE (itype);
8409 /* We could test all of the various optabs involved, but the fact of the
8410 matter is that (with the exception of i486 vs i586 and xadd) all targets
8411 that support any atomic operaton optab also implements compare-and-swap.
8412 Let optabs.c take care of expanding any compare-and-swap loop. */
8413 if (!can_compare_and_swap_p (imode, true))
8414 return false;
8416 gsi = gsi_last_bb (load_bb);
8417 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
8419 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
8420 It only requires that the operation happen atomically. Thus we can
8421 use the RELAXED memory model. */
8422 call = build_call_expr_loc (loc, decl, 3, addr,
8423 fold_convert_loc (loc, itype, rhs),
8424 build_int_cst (NULL,
8425 seq_cst ? MEMMODEL_SEQ_CST
8426 : MEMMODEL_RELAXED));
8428 if (need_old || need_new)
8430 lhs = need_old ? loaded_val : stored_val;
8431 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
8432 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
8434 else
8435 call = fold_convert_loc (loc, void_type_node, call);
8436 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
8437 gsi_remove (&gsi, true);
8439 gsi = gsi_last_bb (store_bb);
8440 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
8441 gsi_remove (&gsi, true);
8442 gsi = gsi_last_bb (store_bb);
8443 gsi_remove (&gsi, true);
8445 if (gimple_in_ssa_p (cfun))
8446 update_ssa (TODO_update_ssa_no_phi);
8448 return true;
8451 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8453 oldval = *addr;
8454 repeat:
8455 newval = rhs; // with oldval replacing *addr in rhs
8456 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
8457 if (oldval != newval)
8458 goto repeat;
8460 INDEX is log2 of the size of the data type, and thus usable to find the
8461 index of the builtin decl. */
8463 static bool
8464 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
8465 tree addr, tree loaded_val, tree stored_val,
8466 int index)
8468 tree loadedi, storedi, initial, new_storedi, old_vali;
8469 tree type, itype, cmpxchg, iaddr;
8470 gimple_stmt_iterator si;
8471 basic_block loop_header = single_succ (load_bb);
8472 gimple phi, stmt;
8473 edge e;
8474 enum built_in_function fncode;
8476 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
8477 order to use the RELAXED memory model effectively. */
8478 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
8479 + index + 1);
8480 cmpxchg = builtin_decl_explicit (fncode);
8481 if (cmpxchg == NULL_TREE)
8482 return false;
8483 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8484 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
8486 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
8487 return false;
8489 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
8490 si = gsi_last_bb (load_bb);
8491 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8493 /* For floating-point values, we'll need to view-convert them to integers
8494 so that we can perform the atomic compare and swap. Simplify the
8495 following code by always setting up the "i"ntegral variables. */
8496 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
8498 tree iaddr_val;
8500 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
8501 true));
8502 iaddr_val
8503 = force_gimple_operand_gsi (&si,
8504 fold_convert (TREE_TYPE (iaddr), addr),
8505 false, NULL_TREE, true, GSI_SAME_STMT);
8506 stmt = gimple_build_assign (iaddr, iaddr_val);
8507 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8508 loadedi = create_tmp_var (itype);
8509 if (gimple_in_ssa_p (cfun))
8510 loadedi = make_ssa_name (loadedi);
8512 else
8514 iaddr = addr;
8515 loadedi = loaded_val;
8518 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
8519 tree loaddecl = builtin_decl_explicit (fncode);
8520 if (loaddecl)
8521 initial
8522 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
8523 build_call_expr (loaddecl, 2, iaddr,
8524 build_int_cst (NULL_TREE,
8525 MEMMODEL_RELAXED)));
8526 else
8527 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
8528 build_int_cst (TREE_TYPE (iaddr), 0));
8530 initial
8531 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
8532 GSI_SAME_STMT);
8534 /* Move the value to the LOADEDI temporary. */
8535 if (gimple_in_ssa_p (cfun))
8537 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
8538 phi = create_phi_node (loadedi, loop_header);
8539 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
8540 initial);
8542 else
8543 gsi_insert_before (&si,
8544 gimple_build_assign (loadedi, initial),
8545 GSI_SAME_STMT);
8546 if (loadedi != loaded_val)
8548 gimple_stmt_iterator gsi2;
8549 tree x;
8551 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
8552 gsi2 = gsi_start_bb (loop_header);
8553 if (gimple_in_ssa_p (cfun))
8555 gassign *stmt;
8556 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8557 true, GSI_SAME_STMT);
8558 stmt = gimple_build_assign (loaded_val, x);
8559 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
8561 else
8563 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
8564 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8565 true, GSI_SAME_STMT);
8568 gsi_remove (&si, true);
8570 si = gsi_last_bb (store_bb);
8571 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8573 if (iaddr == addr)
8574 storedi = stored_val;
8575 else
8576 storedi =
8577 force_gimple_operand_gsi (&si,
8578 build1 (VIEW_CONVERT_EXPR, itype,
8579 stored_val), true, NULL_TREE, true,
8580 GSI_SAME_STMT);
8582 /* Build the compare&swap statement. */
8583 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
8584 new_storedi = force_gimple_operand_gsi (&si,
8585 fold_convert (TREE_TYPE (loadedi),
8586 new_storedi),
8587 true, NULL_TREE,
8588 true, GSI_SAME_STMT);
8590 if (gimple_in_ssa_p (cfun))
8591 old_vali = loadedi;
8592 else
8594 old_vali = create_tmp_var (TREE_TYPE (loadedi));
8595 stmt = gimple_build_assign (old_vali, loadedi);
8596 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8598 stmt = gimple_build_assign (loadedi, new_storedi);
8599 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8602 /* Note that we always perform the comparison as an integer, even for
8603 floating point. This allows the atomic operation to properly
8604 succeed even with NaNs and -0.0. */
8605 stmt = gimple_build_cond_empty
8606 (build2 (NE_EXPR, boolean_type_node,
8607 new_storedi, old_vali));
8608 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8610 /* Update cfg. */
8611 e = single_succ_edge (store_bb);
8612 e->flags &= ~EDGE_FALLTHRU;
8613 e->flags |= EDGE_FALSE_VALUE;
8615 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
8617 /* Copy the new value to loadedi (we already did that before the condition
8618 if we are not in SSA). */
8619 if (gimple_in_ssa_p (cfun))
8621 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
8622 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
8625 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
8626 gsi_remove (&si, true);
8628 struct loop *loop = alloc_loop ();
8629 loop->header = loop_header;
8630 loop->latch = store_bb;
8631 add_loop (loop, loop_header->loop_father);
8633 if (gimple_in_ssa_p (cfun))
8634 update_ssa (TODO_update_ssa_no_phi);
8636 return true;
8639 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8641 GOMP_atomic_start ();
8642 *addr = rhs;
8643 GOMP_atomic_end ();
8645 The result is not globally atomic, but works so long as all parallel
8646 references are within #pragma omp atomic directives. According to
8647 responses received from omp@openmp.org, appears to be within spec.
8648 Which makes sense, since that's how several other compilers handle
8649 this situation as well.
8650 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
8651 expanding. STORED_VAL is the operand of the matching
8652 GIMPLE_OMP_ATOMIC_STORE.
8654 We replace
8655 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
8656 loaded_val = *addr;
8658 and replace
8659 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
8660 *addr = stored_val;
8663 static bool
8664 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
8665 tree addr, tree loaded_val, tree stored_val)
8667 gimple_stmt_iterator si;
8668 gassign *stmt;
8669 tree t;
8671 si = gsi_last_bb (load_bb);
8672 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8674 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
8675 t = build_call_expr (t, 0);
8676 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8678 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
8679 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8680 gsi_remove (&si, true);
8682 si = gsi_last_bb (store_bb);
8683 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8685 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
8686 stored_val);
8687 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8689 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
8690 t = build_call_expr (t, 0);
8691 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8692 gsi_remove (&si, true);
8694 if (gimple_in_ssa_p (cfun))
8695 update_ssa (TODO_update_ssa_no_phi);
8696 return true;
8699 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
8700 using expand_omp_atomic_fetch_op. If it failed, we try to
8701 call expand_omp_atomic_pipeline, and if it fails too, the
8702 ultimate fallback is wrapping the operation in a mutex
8703 (expand_omp_atomic_mutex). REGION is the atomic region built
8704 by build_omp_regions_1(). */
8706 static void
8707 expand_omp_atomic (struct omp_region *region)
8709 basic_block load_bb = region->entry, store_bb = region->exit;
8710 gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb));
8711 gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb));
8712 tree loaded_val = gimple_omp_atomic_load_lhs (load);
8713 tree addr = gimple_omp_atomic_load_rhs (load);
8714 tree stored_val = gimple_omp_atomic_store_val (store);
8715 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8716 HOST_WIDE_INT index;
8718 /* Make sure the type is one of the supported sizes. */
8719 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
8720 index = exact_log2 (index);
8721 if (index >= 0 && index <= 4)
8723 unsigned int align = TYPE_ALIGN_UNIT (type);
8725 /* __sync builtins require strict data alignment. */
8726 if (exact_log2 (align) >= index)
8728 /* Atomic load. */
8729 if (loaded_val == stored_val
8730 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8731 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8732 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8733 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
8734 return;
8736 /* Atomic store. */
8737 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8738 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8739 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8740 && store_bb == single_succ (load_bb)
8741 && first_stmt (store_bb) == store
8742 && expand_omp_atomic_store (load_bb, addr, loaded_val,
8743 stored_val, index))
8744 return;
8746 /* When possible, use specialized atomic update functions. */
8747 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
8748 && store_bb == single_succ (load_bb)
8749 && expand_omp_atomic_fetch_op (load_bb, addr,
8750 loaded_val, stored_val, index))
8751 return;
8753 /* If we don't have specialized __sync builtins, try and implement
8754 as a compare and swap loop. */
8755 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
8756 loaded_val, stored_val, index))
8757 return;
8761 /* The ultimate fallback is wrapping the operation in a mutex. */
8762 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
8766 /* Expand the GIMPLE_OMP_TARGET starting at REGION. */
8768 static void
8769 expand_omp_target (struct omp_region *region)
8771 basic_block entry_bb, exit_bb, new_bb;
8772 struct function *child_cfun;
8773 tree child_fn, block, t;
8774 gimple_stmt_iterator gsi;
8775 gomp_target *entry_stmt;
8776 gimple stmt;
8777 edge e;
8778 bool offloaded, data_region;
8780 entry_stmt = as_a <gomp_target *> (last_stmt (region->entry));
8781 new_bb = region->entry;
8783 offloaded = is_gimple_omp_offloaded (entry_stmt);
8784 switch (gimple_omp_target_kind (entry_stmt))
8786 case GF_OMP_TARGET_KIND_REGION:
8787 case GF_OMP_TARGET_KIND_UPDATE:
8788 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
8789 case GF_OMP_TARGET_KIND_OACC_KERNELS:
8790 case GF_OMP_TARGET_KIND_OACC_UPDATE:
8791 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
8792 data_region = false;
8793 break;
8794 case GF_OMP_TARGET_KIND_DATA:
8795 case GF_OMP_TARGET_KIND_OACC_DATA:
8796 data_region = true;
8797 break;
8798 default:
8799 gcc_unreachable ();
8802 child_fn = NULL_TREE;
8803 child_cfun = NULL;
8804 if (offloaded)
8806 child_fn = gimple_omp_target_child_fn (entry_stmt);
8807 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
8810 /* Supported by expand_omp_taskreg, but not here. */
8811 if (child_cfun != NULL)
8812 gcc_checking_assert (!child_cfun->cfg);
8813 gcc_checking_assert (!gimple_in_ssa_p (cfun));
8815 entry_bb = region->entry;
8816 exit_bb = region->exit;
8818 if (offloaded)
8820 unsigned srcidx, dstidx, num;
8822 /* If the offloading region needs data sent from the parent
8823 function, then the very first statement (except possible
8824 tree profile counter updates) of the offloading body
8825 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
8826 &.OMP_DATA_O is passed as an argument to the child function,
8827 we need to replace it with the argument as seen by the child
8828 function.
8830 In most cases, this will end up being the identity assignment
8831 .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had
8832 a function call that has been inlined, the original PARM_DECL
8833 .OMP_DATA_I may have been converted into a different local
8834 variable. In which case, we need to keep the assignment. */
8835 tree data_arg = gimple_omp_target_data_arg (entry_stmt);
8836 if (data_arg)
8838 basic_block entry_succ_bb = single_succ (entry_bb);
8839 gimple_stmt_iterator gsi;
8840 tree arg;
8841 gimple tgtcopy_stmt = NULL;
8842 tree sender = TREE_VEC_ELT (data_arg, 0);
8844 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
8846 gcc_assert (!gsi_end_p (gsi));
8847 stmt = gsi_stmt (gsi);
8848 if (gimple_code (stmt) != GIMPLE_ASSIGN)
8849 continue;
8851 if (gimple_num_ops (stmt) == 2)
8853 tree arg = gimple_assign_rhs1 (stmt);
8855 /* We're ignoring the subcode because we're
8856 effectively doing a STRIP_NOPS. */
8858 if (TREE_CODE (arg) == ADDR_EXPR
8859 && TREE_OPERAND (arg, 0) == sender)
8861 tgtcopy_stmt = stmt;
8862 break;
8867 gcc_assert (tgtcopy_stmt != NULL);
8868 arg = DECL_ARGUMENTS (child_fn);
8870 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
8871 gsi_remove (&gsi, true);
8874 /* Declare local variables needed in CHILD_CFUN. */
8875 block = DECL_INITIAL (child_fn);
8876 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
8877 /* The gimplifier could record temporaries in the offloading block
8878 rather than in containing function's local_decls chain,
8879 which would mean cgraph missed finalizing them. Do it now. */
8880 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
8881 if (TREE_CODE (t) == VAR_DECL
8882 && TREE_STATIC (t)
8883 && !DECL_EXTERNAL (t))
8884 varpool_node::finalize_decl (t);
8885 DECL_SAVED_TREE (child_fn) = NULL;
8886 /* We'll create a CFG for child_fn, so no gimple body is needed. */
8887 gimple_set_body (child_fn, NULL);
8888 TREE_USED (block) = 1;
8890 /* Reset DECL_CONTEXT on function arguments. */
8891 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
8892 DECL_CONTEXT (t) = child_fn;
8894 /* Split ENTRY_BB at GIMPLE_*,
8895 so that it can be moved to the child function. */
8896 gsi = gsi_last_bb (entry_bb);
8897 stmt = gsi_stmt (gsi);
8898 gcc_assert (stmt
8899 && gimple_code (stmt) == gimple_code (entry_stmt));
8900 e = split_block (entry_bb, stmt);
8901 gsi_remove (&gsi, true);
8902 entry_bb = e->dest;
8903 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8905 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
8906 if (exit_bb)
8908 gsi = gsi_last_bb (exit_bb);
8909 gcc_assert (!gsi_end_p (gsi)
8910 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
8911 stmt = gimple_build_return (NULL);
8912 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8913 gsi_remove (&gsi, true);
8916 /* Move the offloading region into CHILD_CFUN. */
8918 block = gimple_block (entry_stmt);
8920 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
8921 if (exit_bb)
8922 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
8923 /* When the OMP expansion process cannot guarantee an up-to-date
8924 loop tree arrange for the child function to fixup loops. */
8925 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8926 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8928 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8929 num = vec_safe_length (child_cfun->local_decls);
8930 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8932 t = (*child_cfun->local_decls)[srcidx];
8933 if (DECL_CONTEXT (t) == cfun->decl)
8934 continue;
8935 if (srcidx != dstidx)
8936 (*child_cfun->local_decls)[dstidx] = t;
8937 dstidx++;
8939 if (dstidx != num)
8940 vec_safe_truncate (child_cfun->local_decls, dstidx);
8942 /* Inform the callgraph about the new function. */
8943 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
8944 cgraph_node::add_new_function (child_fn, true);
8946 #ifdef ENABLE_OFFLOADING
8947 /* Add the new function to the offload table. */
8948 vec_safe_push (offload_funcs, child_fn);
8949 #endif
8951 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8952 fixed in a following pass. */
8953 push_cfun (child_cfun);
8954 cgraph_edge::rebuild_edges ();
8956 #ifdef ENABLE_OFFLOADING
8957 /* Prevent IPA from removing child_fn as unreachable, since there are no
8958 refs from the parent function to child_fn in offload LTO mode. */
8959 struct cgraph_node *node = cgraph_node::get (child_fn);
8960 node->mark_force_output ();
8961 #endif
8963 /* Some EH regions might become dead, see PR34608. If
8964 pass_cleanup_cfg isn't the first pass to happen with the
8965 new child, these dead EH edges might cause problems.
8966 Clean them up now. */
8967 if (flag_exceptions)
8969 basic_block bb;
8970 bool changed = false;
8972 FOR_EACH_BB_FN (bb, cfun)
8973 changed |= gimple_purge_dead_eh_edges (bb);
8974 if (changed)
8975 cleanup_tree_cfg ();
8977 pop_cfun ();
8980 /* Emit a library call to launch the offloading region, or do data
8981 transfers. */
8982 tree t1, t2, t3, t4, device, cond, c, clauses;
8983 enum built_in_function start_ix;
8984 location_t clause_loc;
8986 switch (gimple_omp_target_kind (entry_stmt))
8988 case GF_OMP_TARGET_KIND_REGION:
8989 start_ix = BUILT_IN_GOMP_TARGET;
8990 break;
8991 case GF_OMP_TARGET_KIND_DATA:
8992 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8993 break;
8994 case GF_OMP_TARGET_KIND_UPDATE:
8995 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8996 break;
8997 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
8998 case GF_OMP_TARGET_KIND_OACC_KERNELS:
8999 start_ix = BUILT_IN_GOACC_PARALLEL;
9000 break;
9001 case GF_OMP_TARGET_KIND_OACC_DATA:
9002 start_ix = BUILT_IN_GOACC_DATA_START;
9003 break;
9004 case GF_OMP_TARGET_KIND_OACC_UPDATE:
9005 start_ix = BUILT_IN_GOACC_UPDATE;
9006 break;
9007 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
9008 start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA;
9009 break;
9010 default:
9011 gcc_unreachable ();
9014 clauses = gimple_omp_target_clauses (entry_stmt);
9016 /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime
9017 library choose) and there is no conditional. */
9018 cond = NULL_TREE;
9019 device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV);
9021 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
9022 if (c)
9023 cond = OMP_CLAUSE_IF_EXPR (c);
9025 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
9026 if (c)
9028 /* Even if we pass it to all library function calls, it is currently only
9029 defined/used for the OpenMP target ones. */
9030 gcc_checking_assert (start_ix == BUILT_IN_GOMP_TARGET
9031 || start_ix == BUILT_IN_GOMP_TARGET_DATA
9032 || start_ix == BUILT_IN_GOMP_TARGET_UPDATE);
9034 device = OMP_CLAUSE_DEVICE_ID (c);
9035 clause_loc = OMP_CLAUSE_LOCATION (c);
9037 else
9038 clause_loc = gimple_location (entry_stmt);
9040 /* Ensure 'device' is of the correct type. */
9041 device = fold_convert_loc (clause_loc, integer_type_node, device);
9043 /* If we found the clause 'if (cond)', build
9044 (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */
9045 if (cond)
9047 cond = gimple_boolify (cond);
9049 basic_block cond_bb, then_bb, else_bb;
9050 edge e;
9051 tree tmp_var;
9053 tmp_var = create_tmp_var (TREE_TYPE (device));
9054 if (offloaded)
9055 e = split_block_after_labels (new_bb);
9056 else
9058 gsi = gsi_last_bb (new_bb);
9059 gsi_prev (&gsi);
9060 e = split_block (new_bb, gsi_stmt (gsi));
9062 cond_bb = e->src;
9063 new_bb = e->dest;
9064 remove_edge (e);
9066 then_bb = create_empty_bb (cond_bb);
9067 else_bb = create_empty_bb (then_bb);
9068 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
9069 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
9071 stmt = gimple_build_cond_empty (cond);
9072 gsi = gsi_last_bb (cond_bb);
9073 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
9075 gsi = gsi_start_bb (then_bb);
9076 stmt = gimple_build_assign (tmp_var, device);
9077 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
9079 gsi = gsi_start_bb (else_bb);
9080 stmt = gimple_build_assign (tmp_var,
9081 build_int_cst (integer_type_node,
9082 GOMP_DEVICE_HOST_FALLBACK));
9083 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
9085 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
9086 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
9087 add_bb_to_loop (then_bb, cond_bb->loop_father);
9088 add_bb_to_loop (else_bb, cond_bb->loop_father);
9089 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
9090 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
9092 device = tmp_var;
9095 gsi = gsi_last_bb (new_bb);
9096 t = gimple_omp_target_data_arg (entry_stmt);
9097 if (t == NULL)
9099 t1 = size_zero_node;
9100 t2 = build_zero_cst (ptr_type_node);
9101 t3 = t2;
9102 t4 = t2;
9104 else
9106 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
9107 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
9108 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
9109 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
9110 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
9113 gimple g;
9114 /* The maximum number used by any start_ix, without varargs. */
9115 auto_vec<tree, 11> args;
9116 args.quick_push (device);
9117 if (offloaded)
9118 args.quick_push (build_fold_addr_expr (child_fn));
9119 switch (start_ix)
9121 case BUILT_IN_GOMP_TARGET:
9122 case BUILT_IN_GOMP_TARGET_DATA:
9123 case BUILT_IN_GOMP_TARGET_UPDATE:
9124 /* This const void * is part of the current ABI, but we're not actually
9125 using it. */
9126 args.quick_push (build_zero_cst (ptr_type_node));
9127 break;
9128 case BUILT_IN_GOACC_DATA_START:
9129 case BUILT_IN_GOACC_ENTER_EXIT_DATA:
9130 case BUILT_IN_GOACC_PARALLEL:
9131 case BUILT_IN_GOACC_UPDATE:
9132 break;
9133 default:
9134 gcc_unreachable ();
9136 args.quick_push (t1);
9137 args.quick_push (t2);
9138 args.quick_push (t3);
9139 args.quick_push (t4);
9140 switch (start_ix)
9142 case BUILT_IN_GOACC_DATA_START:
9143 case BUILT_IN_GOMP_TARGET:
9144 case BUILT_IN_GOMP_TARGET_DATA:
9145 case BUILT_IN_GOMP_TARGET_UPDATE:
9146 break;
9147 case BUILT_IN_GOACC_PARALLEL:
9149 tree t_num_gangs, t_num_workers, t_vector_length;
9151 /* Default values for num_gangs, num_workers, and vector_length. */
9152 t_num_gangs = t_num_workers = t_vector_length
9153 = fold_convert_loc (gimple_location (entry_stmt),
9154 integer_type_node, integer_one_node);
9155 /* ..., but if present, use the value specified by the respective
9156 clause, making sure that are of the correct type. */
9157 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_GANGS);
9158 if (c)
9159 t_num_gangs = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9160 integer_type_node,
9161 OMP_CLAUSE_NUM_GANGS_EXPR (c));
9162 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_WORKERS);
9163 if (c)
9164 t_num_workers = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9165 integer_type_node,
9166 OMP_CLAUSE_NUM_WORKERS_EXPR (c));
9167 c = find_omp_clause (clauses, OMP_CLAUSE_VECTOR_LENGTH);
9168 if (c)
9169 t_vector_length = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9170 integer_type_node,
9171 OMP_CLAUSE_VECTOR_LENGTH_EXPR (c));
9172 args.quick_push (t_num_gangs);
9173 args.quick_push (t_num_workers);
9174 args.quick_push (t_vector_length);
9176 /* FALLTHRU */
9177 case BUILT_IN_GOACC_ENTER_EXIT_DATA:
9178 case BUILT_IN_GOACC_UPDATE:
9180 tree t_async;
9181 int t_wait_idx;
9183 /* Default values for t_async. */
9184 t_async = fold_convert_loc (gimple_location (entry_stmt),
9185 integer_type_node,
9186 build_int_cst (integer_type_node,
9187 GOMP_ASYNC_SYNC));
9188 /* ..., but if present, use the value specified by the respective
9189 clause, making sure that is of the correct type. */
9190 c = find_omp_clause (clauses, OMP_CLAUSE_ASYNC);
9191 if (c)
9192 t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9193 integer_type_node,
9194 OMP_CLAUSE_ASYNC_EXPR (c));
9196 args.quick_push (t_async);
9197 /* Save the index, and... */
9198 t_wait_idx = args.length ();
9199 /* ... push a default value. */
9200 args.quick_push (fold_convert_loc (gimple_location (entry_stmt),
9201 integer_type_node,
9202 integer_zero_node));
9203 c = find_omp_clause (clauses, OMP_CLAUSE_WAIT);
9204 if (c)
9206 int n = 0;
9208 for (; c; c = OMP_CLAUSE_CHAIN (c))
9210 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT)
9212 args.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9213 integer_type_node,
9214 OMP_CLAUSE_WAIT_EXPR (c)));
9215 n++;
9219 /* Now that we know the number, replace the default value. */
9220 args.ordered_remove (t_wait_idx);
9221 args.quick_insert (t_wait_idx,
9222 fold_convert_loc (gimple_location (entry_stmt),
9223 integer_type_node,
9224 build_int_cst (integer_type_node, n)));
9227 break;
9228 default:
9229 gcc_unreachable ();
9232 g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args);
9233 gimple_set_location (g, gimple_location (entry_stmt));
9234 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
9235 if (!offloaded)
9237 g = gsi_stmt (gsi);
9238 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
9239 gsi_remove (&gsi, true);
9241 if (data_region
9242 && region->exit)
9244 gsi = gsi_last_bb (region->exit);
9245 g = gsi_stmt (gsi);
9246 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
9247 gsi_remove (&gsi, true);
9252 /* Expand the parallel region tree rooted at REGION. Expansion
9253 proceeds in depth-first order. Innermost regions are expanded
9254 first. This way, parallel regions that require a new function to
9255 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
9256 internal dependencies in their body. */
9258 static void
9259 expand_omp (struct omp_region *region)
9261 while (region)
9263 location_t saved_location;
9264 gimple inner_stmt = NULL;
9266 /* First, determine whether this is a combined parallel+workshare
9267 region. */
9268 if (region->type == GIMPLE_OMP_PARALLEL)
9269 determine_parallel_type (region);
9271 if (region->type == GIMPLE_OMP_FOR
9272 && gimple_omp_for_combined_p (last_stmt (region->entry)))
9273 inner_stmt = last_stmt (region->inner->entry);
9275 if (region->inner)
9276 expand_omp (region->inner);
9278 saved_location = input_location;
9279 if (gimple_has_location (last_stmt (region->entry)))
9280 input_location = gimple_location (last_stmt (region->entry));
9282 switch (region->type)
9284 case GIMPLE_OMP_PARALLEL:
9285 case GIMPLE_OMP_TASK:
9286 expand_omp_taskreg (region);
9287 break;
9289 case GIMPLE_OMP_FOR:
9290 expand_omp_for (region, inner_stmt);
9291 break;
9293 case GIMPLE_OMP_SECTIONS:
9294 expand_omp_sections (region);
9295 break;
9297 case GIMPLE_OMP_SECTION:
9298 /* Individual omp sections are handled together with their
9299 parent GIMPLE_OMP_SECTIONS region. */
9300 break;
9302 case GIMPLE_OMP_SINGLE:
9303 expand_omp_single (region);
9304 break;
9306 case GIMPLE_OMP_MASTER:
9307 case GIMPLE_OMP_TASKGROUP:
9308 case GIMPLE_OMP_ORDERED:
9309 case GIMPLE_OMP_CRITICAL:
9310 case GIMPLE_OMP_TEAMS:
9311 expand_omp_synch (region);
9312 break;
9314 case GIMPLE_OMP_ATOMIC_LOAD:
9315 expand_omp_atomic (region);
9316 break;
9318 case GIMPLE_OMP_TARGET:
9319 expand_omp_target (region);
9320 break;
9322 default:
9323 gcc_unreachable ();
9326 input_location = saved_location;
9327 region = region->next;
9332 /* Helper for build_omp_regions. Scan the dominator tree starting at
9333 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
9334 true, the function ends once a single tree is built (otherwise, whole
9335 forest of OMP constructs may be built). */
9337 static void
9338 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
9339 bool single_tree)
9341 gimple_stmt_iterator gsi;
9342 gimple stmt;
9343 basic_block son;
9345 gsi = gsi_last_bb (bb);
9346 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
9348 struct omp_region *region;
9349 enum gimple_code code;
9351 stmt = gsi_stmt (gsi);
9352 code = gimple_code (stmt);
9353 if (code == GIMPLE_OMP_RETURN)
9355 /* STMT is the return point out of region PARENT. Mark it
9356 as the exit point and make PARENT the immediately
9357 enclosing region. */
9358 gcc_assert (parent);
9359 region = parent;
9360 region->exit = bb;
9361 parent = parent->outer;
9363 else if (code == GIMPLE_OMP_ATOMIC_STORE)
9365 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
9366 GIMPLE_OMP_RETURN, but matches with
9367 GIMPLE_OMP_ATOMIC_LOAD. */
9368 gcc_assert (parent);
9369 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
9370 region = parent;
9371 region->exit = bb;
9372 parent = parent->outer;
9374 else if (code == GIMPLE_OMP_CONTINUE)
9376 gcc_assert (parent);
9377 parent->cont = bb;
9379 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
9381 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
9382 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
9384 else
9386 region = new_omp_region (bb, code, parent);
9387 /* Otherwise... */
9388 if (code == GIMPLE_OMP_TARGET)
9390 switch (gimple_omp_target_kind (stmt))
9392 case GF_OMP_TARGET_KIND_REGION:
9393 case GF_OMP_TARGET_KIND_DATA:
9394 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
9395 case GF_OMP_TARGET_KIND_OACC_KERNELS:
9396 case GF_OMP_TARGET_KIND_OACC_DATA:
9397 break;
9398 case GF_OMP_TARGET_KIND_UPDATE:
9399 case GF_OMP_TARGET_KIND_OACC_UPDATE:
9400 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
9401 /* ..., other than for those stand-alone directives... */
9402 region = NULL;
9403 break;
9404 default:
9405 gcc_unreachable ();
9408 /* ..., this directive becomes the parent for a new region. */
9409 if (region)
9410 parent = region;
9414 if (single_tree && !parent)
9415 return;
9417 for (son = first_dom_son (CDI_DOMINATORS, bb);
9418 son;
9419 son = next_dom_son (CDI_DOMINATORS, son))
9420 build_omp_regions_1 (son, parent, single_tree);
9423 /* Builds the tree of OMP regions rooted at ROOT, storing it to
9424 root_omp_region. */
9426 static void
9427 build_omp_regions_root (basic_block root)
9429 gcc_assert (root_omp_region == NULL);
9430 build_omp_regions_1 (root, NULL, true);
9431 gcc_assert (root_omp_region != NULL);
9434 /* Expands omp construct (and its subconstructs) starting in HEAD. */
9436 void
9437 omp_expand_local (basic_block head)
9439 build_omp_regions_root (head);
9440 if (dump_file && (dump_flags & TDF_DETAILS))
9442 fprintf (dump_file, "\nOMP region tree\n\n");
9443 dump_omp_region (dump_file, root_omp_region, 0);
9444 fprintf (dump_file, "\n");
9447 remove_exit_barriers (root_omp_region);
9448 expand_omp (root_omp_region);
9450 free_omp_regions ();
9453 /* Scan the CFG and build a tree of OMP regions. Return the root of
9454 the OMP region tree. */
9456 static void
9457 build_omp_regions (void)
9459 gcc_assert (root_omp_region == NULL);
9460 calculate_dominance_info (CDI_DOMINATORS);
9461 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
9464 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
9466 static unsigned int
9467 execute_expand_omp (void)
9469 build_omp_regions ();
9471 if (!root_omp_region)
9472 return 0;
9474 if (dump_file)
9476 fprintf (dump_file, "\nOMP region tree\n\n");
9477 dump_omp_region (dump_file, root_omp_region, 0);
9478 fprintf (dump_file, "\n");
9481 remove_exit_barriers (root_omp_region);
9483 expand_omp (root_omp_region);
9485 cleanup_tree_cfg ();
9487 free_omp_regions ();
9489 return 0;
9492 /* OMP expansion -- the default pass, run before creation of SSA form. */
9494 namespace {
9496 const pass_data pass_data_expand_omp =
9498 GIMPLE_PASS, /* type */
9499 "ompexp", /* name */
9500 OPTGROUP_NONE, /* optinfo_flags */
9501 TV_NONE, /* tv_id */
9502 PROP_gimple_any, /* properties_required */
9503 PROP_gimple_eomp, /* properties_provided */
9504 0, /* properties_destroyed */
9505 0, /* todo_flags_start */
9506 0, /* todo_flags_finish */
9509 class pass_expand_omp : public gimple_opt_pass
9511 public:
9512 pass_expand_omp (gcc::context *ctxt)
9513 : gimple_opt_pass (pass_data_expand_omp, ctxt)
9516 /* opt_pass methods: */
9517 virtual unsigned int execute (function *)
9519 bool gate = ((flag_cilkplus != 0 || flag_openacc != 0 || flag_openmp != 0
9520 || flag_openmp_simd != 0)
9521 && !seen_error ());
9523 /* This pass always runs, to provide PROP_gimple_eomp.
9524 But often, there is nothing to do. */
9525 if (!gate)
9526 return 0;
9528 return execute_expand_omp ();
9531 }; // class pass_expand_omp
9533 } // anon namespace
9535 gimple_opt_pass *
9536 make_pass_expand_omp (gcc::context *ctxt)
9538 return new pass_expand_omp (ctxt);
9541 namespace {
9543 const pass_data pass_data_expand_omp_ssa =
9545 GIMPLE_PASS, /* type */
9546 "ompexpssa", /* name */
9547 OPTGROUP_NONE, /* optinfo_flags */
9548 TV_NONE, /* tv_id */
9549 PROP_cfg | PROP_ssa, /* properties_required */
9550 PROP_gimple_eomp, /* properties_provided */
9551 0, /* properties_destroyed */
9552 0, /* todo_flags_start */
9553 TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */
9556 class pass_expand_omp_ssa : public gimple_opt_pass
9558 public:
9559 pass_expand_omp_ssa (gcc::context *ctxt)
9560 : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt)
9563 /* opt_pass methods: */
9564 virtual bool gate (function *fun)
9566 return !(fun->curr_properties & PROP_gimple_eomp);
9568 virtual unsigned int execute (function *) { return execute_expand_omp (); }
9570 }; // class pass_expand_omp_ssa
9572 } // anon namespace
9574 gimple_opt_pass *
9575 make_pass_expand_omp_ssa (gcc::context *ctxt)
9577 return new pass_expand_omp_ssa (ctxt);
9580 /* Routines to lower OMP directives into OMP-GIMPLE. */
9582 /* Helper function to preform, potentially COMPLEX_TYPE, operation and
9583 convert it to gimple. */
9584 static void
9585 oacc_gimple_assign (tree dest, tree_code op, tree src, gimple_seq *seq)
9587 gimple stmt;
9589 if (TREE_CODE (TREE_TYPE (dest)) != COMPLEX_TYPE)
9591 stmt = gimple_build_assign (dest, op, dest, src);
9592 gimple_seq_add_stmt (seq, stmt);
9593 return;
9596 tree t = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9597 tree rdest = fold_build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (dest)), dest);
9598 gimplify_assign (t, rdest, seq);
9599 rdest = t;
9601 t = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9602 tree idest = fold_build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (dest)), dest);
9603 gimplify_assign (t, idest, seq);
9604 idest = t;
9606 t = create_tmp_var (TREE_TYPE (TREE_TYPE (src)));
9607 tree rsrc = fold_build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (src)), src);
9608 gimplify_assign (t, rsrc, seq);
9609 rsrc = t;
9611 t = create_tmp_var (TREE_TYPE (TREE_TYPE (src)));
9612 tree isrc = fold_build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (src)), src);
9613 gimplify_assign (t, isrc, seq);
9614 isrc = t;
9616 tree r = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9617 tree i = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9618 tree result;
9620 if (op == PLUS_EXPR)
9622 stmt = gimple_build_assign (r, op, rdest, rsrc);
9623 gimple_seq_add_stmt (seq, stmt);
9625 stmt = gimple_build_assign (i, op, idest, isrc);
9626 gimple_seq_add_stmt (seq, stmt);
9628 else if (op == MULT_EXPR)
9630 /* Let x = a + ib = dest, y = c + id = src.
9631 x * y = (ac - bd) + i(ad + bc) */
9632 tree ac = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9633 tree bd = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9634 tree ad = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9635 tree bc = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9637 stmt = gimple_build_assign (ac, MULT_EXPR, rdest, rsrc);
9638 gimple_seq_add_stmt (seq, stmt);
9640 stmt = gimple_build_assign (bd, MULT_EXPR, idest, isrc);
9641 gimple_seq_add_stmt (seq, stmt);
9643 stmt = gimple_build_assign (r, MINUS_EXPR, ac, bd);
9644 gimple_seq_add_stmt (seq, stmt);
9646 stmt = gimple_build_assign (ad, MULT_EXPR, rdest, isrc);
9647 gimple_seq_add_stmt (seq, stmt);
9649 stmt = gimple_build_assign (bd, MULT_EXPR, idest, rsrc);
9650 gimple_seq_add_stmt (seq, stmt);
9652 stmt = gimple_build_assign (i, PLUS_EXPR, ad, bc);
9653 gimple_seq_add_stmt (seq, stmt);
9655 else
9656 gcc_unreachable ();
9658 result = build2 (COMPLEX_EXPR, TREE_TYPE (dest), r, i);
9659 gimplify_assign (dest, result, seq);
9662 /* Helper function to initialize local data for the reduction arrays.
9663 The reduction arrays need to be placed inside the calling function
9664 for accelerators, or else the host won't be able to preform the final
9665 reduction. */
9667 static void
9668 oacc_initialize_reduction_data (tree clauses, tree nthreads,
9669 gimple_seq *stmt_seqp, omp_context *ctx)
9671 tree c, t, oc;
9672 gimple stmt;
9673 omp_context *octx;
9675 /* Find the innermost OpenACC parallel context. */
9676 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
9677 && (gimple_omp_target_kind (ctx->stmt)
9678 == GF_OMP_TARGET_KIND_OACC_PARALLEL))
9679 octx = ctx;
9680 else
9681 octx = ctx->outer;
9682 gcc_checking_assert (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
9683 && (gimple_omp_target_kind (octx->stmt)
9684 == GF_OMP_TARGET_KIND_OACC_PARALLEL));
9686 /* Extract the clauses. */
9687 oc = gimple_omp_target_clauses (octx->stmt);
9689 /* Find the last outer clause. */
9690 for (; oc && OMP_CLAUSE_CHAIN (oc); oc = OMP_CLAUSE_CHAIN (oc))
9693 /* Allocate arrays for each reduction variable. */
9694 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9696 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
9697 continue;
9699 tree var = OMP_CLAUSE_DECL (c);
9700 tree type = get_base_type (var);
9701 tree array = lookup_oacc_reduction (oacc_get_reduction_array_id (var),
9702 ctx);
9703 tree size, call;
9705 /* Calculate size of the reduction array. */
9706 t = create_tmp_var (TREE_TYPE (nthreads));
9707 stmt = gimple_build_assign (t, MULT_EXPR, nthreads,
9708 fold_convert (TREE_TYPE (nthreads),
9709 TYPE_SIZE_UNIT (type)));
9710 gimple_seq_add_stmt (stmt_seqp, stmt);
9712 size = create_tmp_var (sizetype);
9713 gimplify_assign (size, fold_build1 (NOP_EXPR, sizetype, t), stmt_seqp);
9715 /* Now allocate memory for it. */
9716 call = unshare_expr (builtin_decl_explicit (BUILT_IN_ALLOCA));
9717 stmt = gimple_build_call (call, 1, size);
9718 gimple_call_set_lhs (stmt, array);
9719 gimple_seq_add_stmt (stmt_seqp, stmt);
9721 /* Map this array into the accelerator. */
9723 /* Add the reduction array to the list of clauses. */
9724 tree x = array;
9725 t = build_omp_clause (gimple_location (ctx->stmt), OMP_CLAUSE_MAP);
9726 OMP_CLAUSE_SET_MAP_KIND (t, GOMP_MAP_FORCE_FROM);
9727 OMP_CLAUSE_DECL (t) = x;
9728 OMP_CLAUSE_CHAIN (t) = NULL;
9729 if (oc)
9730 OMP_CLAUSE_CHAIN (oc) = t;
9731 else
9732 gimple_omp_target_set_clauses (as_a <gomp_target *> (octx->stmt), t);
9733 OMP_CLAUSE_SIZE (t) = size;
9734 oc = t;
9738 /* Helper function to process the array of partial reductions. Nthreads
9739 indicates the number of threads. Unfortunately, GOACC_GET_NUM_THREADS
9740 cannot be used here, because nthreads on the host may be different than
9741 on the accelerator. */
9743 static void
9744 oacc_finalize_reduction_data (tree clauses, tree nthreads,
9745 gimple_seq *stmt_seqp, omp_context *ctx)
9747 tree c, x, var, array, loop_header, loop_body, loop_exit, type;
9748 gimple stmt;
9750 /* Create for loop.
9752 let var = the original reduction variable
9753 let array = reduction variable array
9755 for (i = 0; i < nthreads; i++)
9756 var op= array[i]
9759 loop_header = create_artificial_label (UNKNOWN_LOCATION);
9760 loop_body = create_artificial_label (UNKNOWN_LOCATION);
9761 loop_exit = create_artificial_label (UNKNOWN_LOCATION);
9763 /* Create and initialize an index variable. */
9764 tree ix = create_tmp_var (sizetype);
9765 gimplify_assign (ix, fold_build1 (NOP_EXPR, sizetype, integer_zero_node),
9766 stmt_seqp);
9768 /* Insert the loop header label here. */
9769 gimple_seq_add_stmt (stmt_seqp, gimple_build_label (loop_header));
9771 /* Exit loop if ix >= nthreads. */
9772 x = create_tmp_var (sizetype);
9773 gimplify_assign (x, fold_build1 (NOP_EXPR, sizetype, nthreads), stmt_seqp);
9774 stmt = gimple_build_cond (GE_EXPR, ix, x, loop_exit, loop_body);
9775 gimple_seq_add_stmt (stmt_seqp, stmt);
9777 /* Insert the loop body label here. */
9778 gimple_seq_add_stmt (stmt_seqp, gimple_build_label (loop_body));
9780 /* Collapse each reduction array, one element at a time. */
9781 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9783 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
9784 continue;
9786 tree_code reduction_code = OMP_CLAUSE_REDUCTION_CODE (c);
9788 /* reduction(-:var) sums up the partial results, so it acts
9789 identically to reduction(+:var). */
9790 if (reduction_code == MINUS_EXPR)
9791 reduction_code = PLUS_EXPR;
9793 /* Set up reduction variable var. */
9794 var = OMP_CLAUSE_DECL (c);
9795 type = get_base_type (var);
9796 array = lookup_oacc_reduction (oacc_get_reduction_array_id
9797 (OMP_CLAUSE_DECL (c)), ctx);
9799 /* Calculate the array offset. */
9800 tree offset = create_tmp_var (sizetype);
9801 gimplify_assign (offset, TYPE_SIZE_UNIT (type), stmt_seqp);
9802 stmt = gimple_build_assign (offset, MULT_EXPR, offset, ix);
9803 gimple_seq_add_stmt (stmt_seqp, stmt);
9805 tree ptr = create_tmp_var (TREE_TYPE (array));
9806 stmt = gimple_build_assign (ptr, POINTER_PLUS_EXPR, array, offset);
9807 gimple_seq_add_stmt (stmt_seqp, stmt);
9809 /* Extract array[ix] into mem. */
9810 tree mem = create_tmp_var (type);
9811 gimplify_assign (mem, build_simple_mem_ref (ptr), stmt_seqp);
9813 /* Find the original reduction variable. */
9814 if (is_reference (var))
9815 var = build_simple_mem_ref (var);
9817 tree t = create_tmp_var (type);
9819 x = lang_hooks.decls.omp_clause_assign_op (c, t, var);
9820 gimplify_and_add (unshare_expr(x), stmt_seqp);
9822 /* var = var op mem */
9823 switch (OMP_CLAUSE_REDUCTION_CODE (c))
9825 case TRUTH_ANDIF_EXPR:
9826 case TRUTH_ORIF_EXPR:
9827 t = fold_build2 (OMP_CLAUSE_REDUCTION_CODE (c), integer_type_node,
9828 t, mem);
9829 gimplify_and_add (t, stmt_seqp);
9830 break;
9831 default:
9832 /* The lhs isn't a gimple_reg when var is COMPLEX_TYPE. */
9833 oacc_gimple_assign (t, OMP_CLAUSE_REDUCTION_CODE (c), mem,
9834 stmt_seqp);
9837 t = fold_build1 (NOP_EXPR, TREE_TYPE (var), t);
9838 x = lang_hooks.decls.omp_clause_assign_op (c, var, t);
9839 gimplify_and_add (unshare_expr(x), stmt_seqp);
9842 /* Increment the induction variable. */
9843 tree one = fold_build1 (NOP_EXPR, sizetype, integer_one_node);
9844 stmt = gimple_build_assign (ix, PLUS_EXPR, ix, one);
9845 gimple_seq_add_stmt (stmt_seqp, stmt);
9847 /* Go back to the top of the loop. */
9848 gimple_seq_add_stmt (stmt_seqp, gimple_build_goto (loop_header));
9850 /* Place the loop exit label here. */
9851 gimple_seq_add_stmt (stmt_seqp, gimple_build_label (loop_exit));
9854 /* Scan through all of the gimple stmts searching for an OMP_FOR_EXPR, and
9855 scan that for reductions. */
9857 static void
9858 oacc_process_reduction_data (gimple_seq *body, gimple_seq *in_stmt_seqp,
9859 gimple_seq *out_stmt_seqp, omp_context *ctx)
9861 gimple_stmt_iterator gsi;
9862 gimple_seq inner = NULL;
9864 /* A collapse clause may have inserted a new bind block. */
9865 gsi = gsi_start (*body);
9866 while (!gsi_end_p (gsi))
9868 gimple stmt = gsi_stmt (gsi);
9869 if (gbind *bind_stmt = dyn_cast <gbind *> (stmt))
9871 inner = gimple_bind_body (bind_stmt);
9872 body = &inner;
9873 gsi = gsi_start (*body);
9875 else if (dyn_cast <gomp_for *> (stmt))
9876 break;
9877 else
9878 gsi_next (&gsi);
9881 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
9883 tree clauses, nthreads, t, c, acc_device, acc_device_host, call,
9884 enter, exit;
9885 bool reduction_found = false;
9887 gimple stmt = gsi_stmt (gsi);
9889 switch (gimple_code (stmt))
9891 case GIMPLE_OMP_FOR:
9892 clauses = gimple_omp_for_clauses (stmt);
9894 /* Search for a reduction clause. */
9895 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9896 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
9898 reduction_found = true;
9899 break;
9902 if (!reduction_found)
9903 break;
9905 ctx = maybe_lookup_ctx (stmt);
9906 t = NULL_TREE;
9908 /* Extract the number of threads. */
9909 nthreads = create_tmp_var (sizetype);
9910 t = oacc_max_threads (ctx);
9911 gimplify_assign (nthreads, t, in_stmt_seqp);
9913 /* Determine if this is kernel will be executed on the host. */
9914 call = builtin_decl_explicit (BUILT_IN_ACC_GET_DEVICE_TYPE);
9915 acc_device = create_tmp_var (integer_type_node, ".acc_device_type");
9916 stmt = gimple_build_call (call, 0);
9917 gimple_call_set_lhs (stmt, acc_device);
9918 gimple_seq_add_stmt (in_stmt_seqp, stmt);
9920 /* Set nthreads = 1 for ACC_DEVICE_TYPE=host. */
9921 acc_device_host = create_tmp_var (integer_type_node,
9922 ".acc_device_host");
9923 gimplify_assign (acc_device_host,
9924 build_int_cst (integer_type_node,
9925 GOMP_DEVICE_HOST),
9926 in_stmt_seqp);
9928 enter = create_artificial_label (UNKNOWN_LOCATION);
9929 exit = create_artificial_label (UNKNOWN_LOCATION);
9931 stmt = gimple_build_cond (EQ_EXPR, acc_device, acc_device_host,
9932 enter, exit);
9933 gimple_seq_add_stmt (in_stmt_seqp, stmt);
9934 gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (enter));
9935 gimplify_assign (nthreads, fold_build1 (NOP_EXPR, sizetype,
9936 integer_one_node),
9937 in_stmt_seqp);
9938 gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (exit));
9940 /* Also, set nthreads = 1 for ACC_DEVICE_TYPE=host_nonshm. */
9941 gimplify_assign (acc_device_host,
9942 build_int_cst (integer_type_node,
9943 GOMP_DEVICE_HOST_NONSHM),
9944 in_stmt_seqp);
9946 enter = create_artificial_label (UNKNOWN_LOCATION);
9947 exit = create_artificial_label (UNKNOWN_LOCATION);
9949 stmt = gimple_build_cond (EQ_EXPR, acc_device, acc_device_host,
9950 enter, exit);
9951 gimple_seq_add_stmt (in_stmt_seqp, stmt);
9952 gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (enter));
9953 gimplify_assign (nthreads, fold_build1 (NOP_EXPR, sizetype,
9954 integer_one_node),
9955 in_stmt_seqp);
9956 gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (exit));
9958 oacc_initialize_reduction_data (clauses, nthreads, in_stmt_seqp,
9959 ctx);
9960 oacc_finalize_reduction_data (clauses, nthreads, out_stmt_seqp, ctx);
9961 break;
9962 default:
9963 // Scan for other directives which support reduction here.
9964 break;
9969 /* If ctx is a worksharing context inside of a cancellable parallel
9970 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
9971 and conditional branch to parallel's cancel_label to handle
9972 cancellation in the implicit barrier. */
9974 static void
9975 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
9977 gimple omp_return = gimple_seq_last_stmt (*body);
9978 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
9979 if (gimple_omp_return_nowait_p (omp_return))
9980 return;
9981 if (ctx->outer
9982 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
9983 && ctx->outer->cancellable)
9985 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
9986 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
9987 tree lhs = create_tmp_var (c_bool_type);
9988 gimple_omp_return_set_lhs (omp_return, lhs);
9989 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
9990 gimple g = gimple_build_cond (NE_EXPR, lhs,
9991 fold_convert (c_bool_type,
9992 boolean_false_node),
9993 ctx->outer->cancel_label, fallthru_label);
9994 gimple_seq_add_stmt (body, g);
9995 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
9999 /* Lower the OpenMP sections directive in the current statement in GSI_P.
10000 CTX is the enclosing OMP context for the current statement. */
10002 static void
10003 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10005 tree block, control;
10006 gimple_stmt_iterator tgsi;
10007 gomp_sections *stmt;
10008 gimple t;
10009 gbind *new_stmt, *bind;
10010 gimple_seq ilist, dlist, olist, new_body;
10012 stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
10014 push_gimplify_context ();
10016 dlist = NULL;
10017 ilist = NULL;
10018 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
10019 &ilist, &dlist, ctx, NULL);
10021 new_body = gimple_omp_body (stmt);
10022 gimple_omp_set_body (stmt, NULL);
10023 tgsi = gsi_start (new_body);
10024 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
10026 omp_context *sctx;
10027 gimple sec_start;
10029 sec_start = gsi_stmt (tgsi);
10030 sctx = maybe_lookup_ctx (sec_start);
10031 gcc_assert (sctx);
10033 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
10034 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
10035 GSI_CONTINUE_LINKING);
10036 gimple_omp_set_body (sec_start, NULL);
10038 if (gsi_one_before_end_p (tgsi))
10040 gimple_seq l = NULL;
10041 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
10042 &l, ctx);
10043 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
10044 gimple_omp_section_set_last (sec_start);
10047 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
10048 GSI_CONTINUE_LINKING);
10051 block = make_node (BLOCK);
10052 bind = gimple_build_bind (NULL, new_body, block);
10054 olist = NULL;
10055 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
10057 block = make_node (BLOCK);
10058 new_stmt = gimple_build_bind (NULL, NULL, block);
10059 gsi_replace (gsi_p, new_stmt, true);
10061 pop_gimplify_context (new_stmt);
10062 gimple_bind_append_vars (new_stmt, ctx->block_vars);
10063 BLOCK_VARS (block) = gimple_bind_vars (bind);
10064 if (BLOCK_VARS (block))
10065 TREE_USED (block) = 1;
10067 new_body = NULL;
10068 gimple_seq_add_seq (&new_body, ilist);
10069 gimple_seq_add_stmt (&new_body, stmt);
10070 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
10071 gimple_seq_add_stmt (&new_body, bind);
10073 control = create_tmp_var (unsigned_type_node, ".section");
10074 t = gimple_build_omp_continue (control, control);
10075 gimple_omp_sections_set_control (stmt, control);
10076 gimple_seq_add_stmt (&new_body, t);
10078 gimple_seq_add_seq (&new_body, olist);
10079 if (ctx->cancellable)
10080 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
10081 gimple_seq_add_seq (&new_body, dlist);
10083 new_body = maybe_catch_exception (new_body);
10085 t = gimple_build_omp_return
10086 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
10087 OMP_CLAUSE_NOWAIT));
10088 gimple_seq_add_stmt (&new_body, t);
10089 maybe_add_implicit_barrier_cancel (ctx, &new_body);
10091 gimple_bind_set_body (new_stmt, new_body);
10095 /* A subroutine of lower_omp_single. Expand the simple form of
10096 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
10098 if (GOMP_single_start ())
10099 BODY;
10100 [ GOMP_barrier (); ] -> unless 'nowait' is present.
10102 FIXME. It may be better to delay expanding the logic of this until
10103 pass_expand_omp. The expanded logic may make the job more difficult
10104 to a synchronization analysis pass. */
10106 static void
10107 lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p)
10109 location_t loc = gimple_location (single_stmt);
10110 tree tlabel = create_artificial_label (loc);
10111 tree flabel = create_artificial_label (loc);
10112 gimple call, cond;
10113 tree lhs, decl;
10115 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
10116 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)));
10117 call = gimple_build_call (decl, 0);
10118 gimple_call_set_lhs (call, lhs);
10119 gimple_seq_add_stmt (pre_p, call);
10121 cond = gimple_build_cond (EQ_EXPR, lhs,
10122 fold_convert_loc (loc, TREE_TYPE (lhs),
10123 boolean_true_node),
10124 tlabel, flabel);
10125 gimple_seq_add_stmt (pre_p, cond);
10126 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
10127 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
10128 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
10132 /* A subroutine of lower_omp_single. Expand the simple form of
10133 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
10135 #pragma omp single copyprivate (a, b, c)
10137 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
10140 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
10142 BODY;
10143 copyout.a = a;
10144 copyout.b = b;
10145 copyout.c = c;
10146 GOMP_single_copy_end (&copyout);
10148 else
10150 a = copyout_p->a;
10151 b = copyout_p->b;
10152 c = copyout_p->c;
10154 GOMP_barrier ();
10157 FIXME. It may be better to delay expanding the logic of this until
10158 pass_expand_omp. The expanded logic may make the job more difficult
10159 to a synchronization analysis pass. */
10161 static void
10162 lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p,
10163 omp_context *ctx)
10165 tree ptr_type, t, l0, l1, l2, bfn_decl;
10166 gimple_seq copyin_seq;
10167 location_t loc = gimple_location (single_stmt);
10169 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
10171 ptr_type = build_pointer_type (ctx->record_type);
10172 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
10174 l0 = create_artificial_label (loc);
10175 l1 = create_artificial_label (loc);
10176 l2 = create_artificial_label (loc);
10178 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
10179 t = build_call_expr_loc (loc, bfn_decl, 0);
10180 t = fold_convert_loc (loc, ptr_type, t);
10181 gimplify_assign (ctx->receiver_decl, t, pre_p);
10183 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
10184 build_int_cst (ptr_type, 0));
10185 t = build3 (COND_EXPR, void_type_node, t,
10186 build_and_jump (&l0), build_and_jump (&l1));
10187 gimplify_and_add (t, pre_p);
10189 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
10191 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
10193 copyin_seq = NULL;
10194 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
10195 &copyin_seq, ctx);
10197 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10198 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
10199 t = build_call_expr_loc (loc, bfn_decl, 1, t);
10200 gimplify_and_add (t, pre_p);
10202 t = build_and_jump (&l2);
10203 gimplify_and_add (t, pre_p);
10205 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
10207 gimple_seq_add_seq (pre_p, copyin_seq);
10209 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
10213 /* Expand code for an OpenMP single directive. */
10215 static void
10216 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10218 tree block;
10219 gimple t;
10220 gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p));
10221 gbind *bind;
10222 gimple_seq bind_body, bind_body_tail = NULL, dlist;
10224 push_gimplify_context ();
10226 block = make_node (BLOCK);
10227 bind = gimple_build_bind (NULL, NULL, block);
10228 gsi_replace (gsi_p, bind, true);
10229 bind_body = NULL;
10230 dlist = NULL;
10231 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
10232 &bind_body, &dlist, ctx, NULL);
10233 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
10235 gimple_seq_add_stmt (&bind_body, single_stmt);
10237 if (ctx->record_type)
10238 lower_omp_single_copy (single_stmt, &bind_body, ctx);
10239 else
10240 lower_omp_single_simple (single_stmt, &bind_body);
10242 gimple_omp_set_body (single_stmt, NULL);
10244 gimple_seq_add_seq (&bind_body, dlist);
10246 bind_body = maybe_catch_exception (bind_body);
10248 t = gimple_build_omp_return
10249 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
10250 OMP_CLAUSE_NOWAIT));
10251 gimple_seq_add_stmt (&bind_body_tail, t);
10252 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
10253 if (ctx->record_type)
10255 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
10256 tree clobber = build_constructor (ctx->record_type, NULL);
10257 TREE_THIS_VOLATILE (clobber) = 1;
10258 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
10259 clobber), GSI_SAME_STMT);
10261 gimple_seq_add_seq (&bind_body, bind_body_tail);
10262 gimple_bind_set_body (bind, bind_body);
10264 pop_gimplify_context (bind);
10266 gimple_bind_append_vars (bind, ctx->block_vars);
10267 BLOCK_VARS (block) = ctx->block_vars;
10268 if (BLOCK_VARS (block))
10269 TREE_USED (block) = 1;
10273 /* Expand code for an OpenMP master directive. */
10275 static void
10276 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10278 tree block, lab = NULL, x, bfn_decl;
10279 gimple stmt = gsi_stmt (*gsi_p);
10280 gbind *bind;
10281 location_t loc = gimple_location (stmt);
10282 gimple_seq tseq;
10284 push_gimplify_context ();
10286 block = make_node (BLOCK);
10287 bind = gimple_build_bind (NULL, NULL, block);
10288 gsi_replace (gsi_p, bind, true);
10289 gimple_bind_add_stmt (bind, stmt);
10291 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
10292 x = build_call_expr_loc (loc, bfn_decl, 0);
10293 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
10294 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
10295 tseq = NULL;
10296 gimplify_and_add (x, &tseq);
10297 gimple_bind_add_seq (bind, tseq);
10299 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10300 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
10301 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
10302 gimple_omp_set_body (stmt, NULL);
10304 gimple_bind_add_stmt (bind, gimple_build_label (lab));
10306 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
10308 pop_gimplify_context (bind);
10310 gimple_bind_append_vars (bind, ctx->block_vars);
10311 BLOCK_VARS (block) = ctx->block_vars;
10315 /* Expand code for an OpenMP taskgroup directive. */
10317 static void
10318 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10320 gimple stmt = gsi_stmt (*gsi_p);
10321 gcall *x;
10322 gbind *bind;
10323 tree block = make_node (BLOCK);
10325 bind = gimple_build_bind (NULL, NULL, block);
10326 gsi_replace (gsi_p, bind, true);
10327 gimple_bind_add_stmt (bind, stmt);
10329 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
10331 gimple_bind_add_stmt (bind, x);
10333 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10334 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
10335 gimple_omp_set_body (stmt, NULL);
10337 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
10339 gimple_bind_append_vars (bind, ctx->block_vars);
10340 BLOCK_VARS (block) = ctx->block_vars;
10344 /* Expand code for an OpenMP ordered directive. */
10346 static void
10347 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10349 tree block;
10350 gimple stmt = gsi_stmt (*gsi_p);
10351 gcall *x;
10352 gbind *bind;
10354 push_gimplify_context ();
10356 block = make_node (BLOCK);
10357 bind = gimple_build_bind (NULL, NULL, block);
10358 gsi_replace (gsi_p, bind, true);
10359 gimple_bind_add_stmt (bind, stmt);
10361 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
10363 gimple_bind_add_stmt (bind, x);
10365 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10366 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
10367 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
10368 gimple_omp_set_body (stmt, NULL);
10370 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
10371 gimple_bind_add_stmt (bind, x);
10373 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
10375 pop_gimplify_context (bind);
10377 gimple_bind_append_vars (bind, ctx->block_vars);
10378 BLOCK_VARS (block) = gimple_bind_vars (bind);
10382 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
10383 substitution of a couple of function calls. But in the NAMED case,
10384 requires that languages coordinate a symbol name. It is therefore
10385 best put here in common code. */
10387 static GTY(()) hash_map<tree, tree> *critical_name_mutexes;
10389 static void
10390 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10392 tree block;
10393 tree name, lock, unlock;
10394 gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p));
10395 gbind *bind;
10396 location_t loc = gimple_location (stmt);
10397 gimple_seq tbody;
10399 name = gimple_omp_critical_name (stmt);
10400 if (name)
10402 tree decl;
10404 if (!critical_name_mutexes)
10405 critical_name_mutexes = hash_map<tree, tree>::create_ggc (10);
10407 tree *n = critical_name_mutexes->get (name);
10408 if (n == NULL)
10410 char *new_str;
10412 decl = create_tmp_var_raw (ptr_type_node);
10414 new_str = ACONCAT ((".gomp_critical_user_",
10415 IDENTIFIER_POINTER (name), NULL));
10416 DECL_NAME (decl) = get_identifier (new_str);
10417 TREE_PUBLIC (decl) = 1;
10418 TREE_STATIC (decl) = 1;
10419 DECL_COMMON (decl) = 1;
10420 DECL_ARTIFICIAL (decl) = 1;
10421 DECL_IGNORED_P (decl) = 1;
10423 varpool_node::finalize_decl (decl);
10425 critical_name_mutexes->put (name, decl);
10427 else
10428 decl = *n;
10430 /* If '#pragma omp critical' is inside offloaded region or
10431 inside function marked as offloadable, the symbol must be
10432 marked as offloadable too. */
10433 omp_context *octx;
10434 if (cgraph_node::get (current_function_decl)->offloadable)
10435 varpool_node::get_create (decl)->offloadable = 1;
10436 else
10437 for (octx = ctx->outer; octx; octx = octx->outer)
10438 if (is_gimple_omp_offloaded (octx->stmt))
10440 varpool_node::get_create (decl)->offloadable = 1;
10441 break;
10444 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
10445 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
10447 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
10448 unlock = build_call_expr_loc (loc, unlock, 1,
10449 build_fold_addr_expr_loc (loc, decl));
10451 else
10453 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
10454 lock = build_call_expr_loc (loc, lock, 0);
10456 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
10457 unlock = build_call_expr_loc (loc, unlock, 0);
10460 push_gimplify_context ();
10462 block = make_node (BLOCK);
10463 bind = gimple_build_bind (NULL, NULL, block);
10464 gsi_replace (gsi_p, bind, true);
10465 gimple_bind_add_stmt (bind, stmt);
10467 tbody = gimple_bind_body (bind);
10468 gimplify_and_add (lock, &tbody);
10469 gimple_bind_set_body (bind, tbody);
10471 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10472 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
10473 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
10474 gimple_omp_set_body (stmt, NULL);
10476 tbody = gimple_bind_body (bind);
10477 gimplify_and_add (unlock, &tbody);
10478 gimple_bind_set_body (bind, tbody);
10480 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
10482 pop_gimplify_context (bind);
10483 gimple_bind_append_vars (bind, ctx->block_vars);
10484 BLOCK_VARS (block) = gimple_bind_vars (bind);
10488 /* A subroutine of lower_omp_for. Generate code to emit the predicate
10489 for a lastprivate clause. Given a loop control predicate of (V
10490 cond N2), we gate the clause on (!(V cond N2)). The lowered form
10491 is appended to *DLIST, iterator initialization is appended to
10492 *BODY_P. */
10494 static void
10495 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
10496 gimple_seq *dlist, struct omp_context *ctx)
10498 tree clauses, cond, vinit;
10499 enum tree_code cond_code;
10500 gimple_seq stmts;
10502 cond_code = fd->loop.cond_code;
10503 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
10505 /* When possible, use a strict equality expression. This can let VRP
10506 type optimizations deduce the value and remove a copy. */
10507 if (tree_fits_shwi_p (fd->loop.step))
10509 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
10510 if (step == 1 || step == -1)
10511 cond_code = EQ_EXPR;
10514 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
10516 clauses = gimple_omp_for_clauses (fd->for_stmt);
10517 stmts = NULL;
10518 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
10519 if (!gimple_seq_empty_p (stmts))
10521 gimple_seq_add_seq (&stmts, *dlist);
10522 *dlist = stmts;
10524 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
10525 vinit = fd->loop.n1;
10526 if (cond_code == EQ_EXPR
10527 && tree_fits_shwi_p (fd->loop.n2)
10528 && ! integer_zerop (fd->loop.n2))
10529 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
10530 else
10531 vinit = unshare_expr (vinit);
10533 /* Initialize the iterator variable, so that threads that don't execute
10534 any iterations don't execute the lastprivate clauses by accident. */
10535 gimplify_assign (fd->loop.v, vinit, body_p);
10540 /* Lower code for an OMP loop directive. */
10542 static void
10543 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10545 tree *rhs_p, block;
10546 struct omp_for_data fd, *fdp = NULL;
10547 gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
10548 gbind *new_stmt;
10549 gimple_seq omp_for_body, body, dlist;
10550 size_t i;
10552 push_gimplify_context ();
10554 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
10556 block = make_node (BLOCK);
10557 new_stmt = gimple_build_bind (NULL, NULL, block);
10558 /* Replace at gsi right away, so that 'stmt' is no member
10559 of a sequence anymore as we're going to add to to a different
10560 one below. */
10561 gsi_replace (gsi_p, new_stmt, true);
10563 /* Move declaration of temporaries in the loop body before we make
10564 it go away. */
10565 omp_for_body = gimple_omp_body (stmt);
10566 if (!gimple_seq_empty_p (omp_for_body)
10567 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
10569 gbind *inner_bind
10570 = as_a <gbind *> (gimple_seq_first_stmt (omp_for_body));
10571 tree vars = gimple_bind_vars (inner_bind);
10572 gimple_bind_append_vars (new_stmt, vars);
10573 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
10574 keep them on the inner_bind and it's block. */
10575 gimple_bind_set_vars (inner_bind, NULL_TREE);
10576 if (gimple_bind_block (inner_bind))
10577 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
10580 if (gimple_omp_for_combined_into_p (stmt))
10582 extract_omp_for_data (stmt, &fd, NULL);
10583 fdp = &fd;
10585 /* We need two temporaries with fd.loop.v type (istart/iend)
10586 and then (fd.collapse - 1) temporaries with the same
10587 type for count2 ... countN-1 vars if not constant. */
10588 size_t count = 2;
10589 tree type = fd.iter_type;
10590 if (fd.collapse > 1
10591 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
10592 count += fd.collapse - 1;
10593 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
10594 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
10595 tree clauses = *pc;
10596 if (parallel_for)
10597 outerc
10598 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
10599 OMP_CLAUSE__LOOPTEMP_);
10600 for (i = 0; i < count; i++)
10602 tree temp;
10603 if (parallel_for)
10605 gcc_assert (outerc);
10606 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
10607 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
10608 OMP_CLAUSE__LOOPTEMP_);
10610 else
10612 temp = create_tmp_var (type);
10613 insert_decl_map (&ctx->outer->cb, temp, temp);
10615 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
10616 OMP_CLAUSE_DECL (*pc) = temp;
10617 pc = &OMP_CLAUSE_CHAIN (*pc);
10619 *pc = clauses;
10622 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
10623 dlist = NULL;
10624 body = NULL;
10625 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
10626 fdp);
10627 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
10629 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10631 /* Lower the header expressions. At this point, we can assume that
10632 the header is of the form:
10634 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
10636 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
10637 using the .omp_data_s mapping, if needed. */
10638 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
10640 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
10641 if (!is_gimple_min_invariant (*rhs_p))
10642 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
10644 rhs_p = gimple_omp_for_final_ptr (stmt, i);
10645 if (!is_gimple_min_invariant (*rhs_p))
10646 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
10648 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
10649 if (!is_gimple_min_invariant (*rhs_p))
10650 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
10653 /* Once lowered, extract the bounds and clauses. */
10654 extract_omp_for_data (stmt, &fd, NULL);
10656 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
10658 gimple_seq_add_stmt (&body, stmt);
10659 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
10661 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
10662 fd.loop.v));
10664 /* After the loop, add exit clauses. */
10665 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
10667 if (ctx->cancellable)
10668 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
10670 gimple_seq_add_seq (&body, dlist);
10672 body = maybe_catch_exception (body);
10674 /* Region exit marker goes at the end of the loop body. */
10675 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
10676 maybe_add_implicit_barrier_cancel (ctx, &body);
10677 pop_gimplify_context (new_stmt);
10679 gimple_bind_append_vars (new_stmt, ctx->block_vars);
10680 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
10681 if (BLOCK_VARS (block))
10682 TREE_USED (block) = 1;
10684 gimple_bind_set_body (new_stmt, body);
10685 gimple_omp_set_body (stmt, NULL);
10686 gimple_omp_for_set_pre_body (stmt, NULL);
10689 /* Callback for walk_stmts. Check if the current statement only contains
10690 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
10692 static tree
10693 check_combined_parallel (gimple_stmt_iterator *gsi_p,
10694 bool *handled_ops_p,
10695 struct walk_stmt_info *wi)
10697 int *info = (int *) wi->info;
10698 gimple stmt = gsi_stmt (*gsi_p);
10700 *handled_ops_p = true;
10701 switch (gimple_code (stmt))
10703 WALK_SUBSTMTS;
10705 case GIMPLE_OMP_FOR:
10706 case GIMPLE_OMP_SECTIONS:
10707 *info = *info == 0 ? 1 : -1;
10708 break;
10709 default:
10710 *info = -1;
10711 break;
10713 return NULL;
10716 struct omp_taskcopy_context
10718 /* This field must be at the beginning, as we do "inheritance": Some
10719 callback functions for tree-inline.c (e.g., omp_copy_decl)
10720 receive a copy_body_data pointer that is up-casted to an
10721 omp_context pointer. */
10722 copy_body_data cb;
10723 omp_context *ctx;
10726 static tree
10727 task_copyfn_copy_decl (tree var, copy_body_data *cb)
10729 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
10731 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
10732 return create_tmp_var (TREE_TYPE (var));
10734 return var;
10737 static tree
10738 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
10740 tree name, new_fields = NULL, type, f;
10742 type = lang_hooks.types.make_type (RECORD_TYPE);
10743 name = DECL_NAME (TYPE_NAME (orig_type));
10744 name = build_decl (gimple_location (tcctx->ctx->stmt),
10745 TYPE_DECL, name, type);
10746 TYPE_NAME (type) = name;
10748 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
10750 tree new_f = copy_node (f);
10751 DECL_CONTEXT (new_f) = type;
10752 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
10753 TREE_CHAIN (new_f) = new_fields;
10754 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
10755 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
10756 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
10757 &tcctx->cb, NULL);
10758 new_fields = new_f;
10759 tcctx->cb.decl_map->put (f, new_f);
10761 TYPE_FIELDS (type) = nreverse (new_fields);
10762 layout_type (type);
10763 return type;
10766 /* Create task copyfn. */
10768 static void
10769 create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
10771 struct function *child_cfun;
10772 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
10773 tree record_type, srecord_type, bind, list;
10774 bool record_needs_remap = false, srecord_needs_remap = false;
10775 splay_tree_node n;
10776 struct omp_taskcopy_context tcctx;
10777 location_t loc = gimple_location (task_stmt);
10779 child_fn = gimple_omp_task_copy_fn (task_stmt);
10780 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
10781 gcc_assert (child_cfun->cfg == NULL);
10782 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
10784 /* Reset DECL_CONTEXT on function arguments. */
10785 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
10786 DECL_CONTEXT (t) = child_fn;
10788 /* Populate the function. */
10789 push_gimplify_context ();
10790 push_cfun (child_cfun);
10792 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
10793 TREE_SIDE_EFFECTS (bind) = 1;
10794 list = NULL;
10795 DECL_SAVED_TREE (child_fn) = bind;
10796 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
10798 /* Remap src and dst argument types if needed. */
10799 record_type = ctx->record_type;
10800 srecord_type = ctx->srecord_type;
10801 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
10802 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
10804 record_needs_remap = true;
10805 break;
10807 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
10808 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
10810 srecord_needs_remap = true;
10811 break;
10814 if (record_needs_remap || srecord_needs_remap)
10816 memset (&tcctx, '\0', sizeof (tcctx));
10817 tcctx.cb.src_fn = ctx->cb.src_fn;
10818 tcctx.cb.dst_fn = child_fn;
10819 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
10820 gcc_checking_assert (tcctx.cb.src_node);
10821 tcctx.cb.dst_node = tcctx.cb.src_node;
10822 tcctx.cb.src_cfun = ctx->cb.src_cfun;
10823 tcctx.cb.copy_decl = task_copyfn_copy_decl;
10824 tcctx.cb.eh_lp_nr = 0;
10825 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
10826 tcctx.cb.decl_map = new hash_map<tree, tree>;
10827 tcctx.ctx = ctx;
10829 if (record_needs_remap)
10830 record_type = task_copyfn_remap_type (&tcctx, record_type);
10831 if (srecord_needs_remap)
10832 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
10834 else
10835 tcctx.cb.decl_map = NULL;
10837 arg = DECL_ARGUMENTS (child_fn);
10838 TREE_TYPE (arg) = build_pointer_type (record_type);
10839 sarg = DECL_CHAIN (arg);
10840 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
10842 /* First pass: initialize temporaries used in record_type and srecord_type
10843 sizes and field offsets. */
10844 if (tcctx.cb.decl_map)
10845 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
10846 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
10848 tree *p;
10850 decl = OMP_CLAUSE_DECL (c);
10851 p = tcctx.cb.decl_map->get (decl);
10852 if (p == NULL)
10853 continue;
10854 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
10855 sf = (tree) n->value;
10856 sf = *tcctx.cb.decl_map->get (sf);
10857 src = build_simple_mem_ref_loc (loc, sarg);
10858 src = omp_build_component_ref (src, sf);
10859 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
10860 append_to_statement_list (t, &list);
10863 /* Second pass: copy shared var pointers and copy construct non-VLA
10864 firstprivate vars. */
10865 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
10866 switch (OMP_CLAUSE_CODE (c))
10868 case OMP_CLAUSE_SHARED:
10869 decl = OMP_CLAUSE_DECL (c);
10870 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
10871 if (n == NULL)
10872 break;
10873 f = (tree) n->value;
10874 if (tcctx.cb.decl_map)
10875 f = *tcctx.cb.decl_map->get (f);
10876 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
10877 sf = (tree) n->value;
10878 if (tcctx.cb.decl_map)
10879 sf = *tcctx.cb.decl_map->get (sf);
10880 src = build_simple_mem_ref_loc (loc, sarg);
10881 src = omp_build_component_ref (src, sf);
10882 dst = build_simple_mem_ref_loc (loc, arg);
10883 dst = omp_build_component_ref (dst, f);
10884 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
10885 append_to_statement_list (t, &list);
10886 break;
10887 case OMP_CLAUSE_FIRSTPRIVATE:
10888 decl = OMP_CLAUSE_DECL (c);
10889 if (is_variable_sized (decl))
10890 break;
10891 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
10892 if (n == NULL)
10893 break;
10894 f = (tree) n->value;
10895 if (tcctx.cb.decl_map)
10896 f = *tcctx.cb.decl_map->get (f);
10897 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
10898 if (n != NULL)
10900 sf = (tree) n->value;
10901 if (tcctx.cb.decl_map)
10902 sf = *tcctx.cb.decl_map->get (sf);
10903 src = build_simple_mem_ref_loc (loc, sarg);
10904 src = omp_build_component_ref (src, sf);
10905 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
10906 src = build_simple_mem_ref_loc (loc, src);
10908 else
10909 src = decl;
10910 dst = build_simple_mem_ref_loc (loc, arg);
10911 dst = omp_build_component_ref (dst, f);
10912 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
10913 append_to_statement_list (t, &list);
10914 break;
10915 case OMP_CLAUSE_PRIVATE:
10916 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
10917 break;
10918 decl = OMP_CLAUSE_DECL (c);
10919 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
10920 f = (tree) n->value;
10921 if (tcctx.cb.decl_map)
10922 f = *tcctx.cb.decl_map->get (f);
10923 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
10924 if (n != NULL)
10926 sf = (tree) n->value;
10927 if (tcctx.cb.decl_map)
10928 sf = *tcctx.cb.decl_map->get (sf);
10929 src = build_simple_mem_ref_loc (loc, sarg);
10930 src = omp_build_component_ref (src, sf);
10931 if (use_pointer_for_field (decl, NULL))
10932 src = build_simple_mem_ref_loc (loc, src);
10934 else
10935 src = decl;
10936 dst = build_simple_mem_ref_loc (loc, arg);
10937 dst = omp_build_component_ref (dst, f);
10938 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
10939 append_to_statement_list (t, &list);
10940 break;
10941 default:
10942 break;
10945 /* Last pass: handle VLA firstprivates. */
10946 if (tcctx.cb.decl_map)
10947 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
10948 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
10950 tree ind, ptr, df;
10952 decl = OMP_CLAUSE_DECL (c);
10953 if (!is_variable_sized (decl))
10954 continue;
10955 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
10956 if (n == NULL)
10957 continue;
10958 f = (tree) n->value;
10959 f = *tcctx.cb.decl_map->get (f);
10960 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
10961 ind = DECL_VALUE_EXPR (decl);
10962 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
10963 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
10964 n = splay_tree_lookup (ctx->sfield_map,
10965 (splay_tree_key) TREE_OPERAND (ind, 0));
10966 sf = (tree) n->value;
10967 sf = *tcctx.cb.decl_map->get (sf);
10968 src = build_simple_mem_ref_loc (loc, sarg);
10969 src = omp_build_component_ref (src, sf);
10970 src = build_simple_mem_ref_loc (loc, src);
10971 dst = build_simple_mem_ref_loc (loc, arg);
10972 dst = omp_build_component_ref (dst, f);
10973 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
10974 append_to_statement_list (t, &list);
10975 n = splay_tree_lookup (ctx->field_map,
10976 (splay_tree_key) TREE_OPERAND (ind, 0));
10977 df = (tree) n->value;
10978 df = *tcctx.cb.decl_map->get (df);
10979 ptr = build_simple_mem_ref_loc (loc, arg);
10980 ptr = omp_build_component_ref (ptr, df);
10981 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
10982 build_fold_addr_expr_loc (loc, dst));
10983 append_to_statement_list (t, &list);
10986 t = build1 (RETURN_EXPR, void_type_node, NULL);
10987 append_to_statement_list (t, &list);
10989 if (tcctx.cb.decl_map)
10990 delete tcctx.cb.decl_map;
10991 pop_gimplify_context (NULL);
10992 BIND_EXPR_BODY (bind) = list;
10993 pop_cfun ();
10996 static void
10997 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
10999 tree c, clauses;
11000 gimple g;
11001 size_t n_in = 0, n_out = 0, idx = 2, i;
11003 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
11004 OMP_CLAUSE_DEPEND);
11005 gcc_assert (clauses);
11006 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
11007 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
11008 switch (OMP_CLAUSE_DEPEND_KIND (c))
11010 case OMP_CLAUSE_DEPEND_IN:
11011 n_in++;
11012 break;
11013 case OMP_CLAUSE_DEPEND_OUT:
11014 case OMP_CLAUSE_DEPEND_INOUT:
11015 n_out++;
11016 break;
11017 default:
11018 gcc_unreachable ();
11020 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
11021 tree array = create_tmp_var (type);
11022 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
11023 NULL_TREE);
11024 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
11025 gimple_seq_add_stmt (iseq, g);
11026 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
11027 NULL_TREE);
11028 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
11029 gimple_seq_add_stmt (iseq, g);
11030 for (i = 0; i < 2; i++)
11032 if ((i ? n_in : n_out) == 0)
11033 continue;
11034 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
11035 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
11036 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
11038 tree t = OMP_CLAUSE_DECL (c);
11039 t = fold_convert (ptr_type_node, t);
11040 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
11041 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
11042 NULL_TREE, NULL_TREE);
11043 g = gimple_build_assign (r, t);
11044 gimple_seq_add_stmt (iseq, g);
11047 tree *p = gimple_omp_task_clauses_ptr (stmt);
11048 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
11049 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
11050 OMP_CLAUSE_CHAIN (c) = *p;
11051 *p = c;
11052 tree clobber = build_constructor (type, NULL);
11053 TREE_THIS_VOLATILE (clobber) = 1;
11054 g = gimple_build_assign (array, clobber);
11055 gimple_seq_add_stmt (oseq, g);
11058 /* Lower the OpenMP parallel or task directive in the current statement
11059 in GSI_P. CTX holds context information for the directive. */
11061 static void
11062 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
11064 tree clauses;
11065 tree child_fn, t;
11066 gimple stmt = gsi_stmt (*gsi_p);
11067 gbind *par_bind, *bind, *dep_bind = NULL;
11068 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
11069 location_t loc = gimple_location (stmt);
11071 clauses = gimple_omp_taskreg_clauses (stmt);
11072 par_bind
11073 = as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
11074 par_body = gimple_bind_body (par_bind);
11075 child_fn = ctx->cb.dst_fn;
11076 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
11077 && !gimple_omp_parallel_combined_p (stmt))
11079 struct walk_stmt_info wi;
11080 int ws_num = 0;
11082 memset (&wi, 0, sizeof (wi));
11083 wi.info = &ws_num;
11084 wi.val_only = true;
11085 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
11086 if (ws_num == 1)
11087 gimple_omp_parallel_set_combined_p (stmt, true);
11089 gimple_seq dep_ilist = NULL;
11090 gimple_seq dep_olist = NULL;
11091 if (gimple_code (stmt) == GIMPLE_OMP_TASK
11092 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
11094 push_gimplify_context ();
11095 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
11096 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
11099 if (ctx->srecord_type)
11100 create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
11102 push_gimplify_context ();
11104 par_olist = NULL;
11105 par_ilist = NULL;
11106 par_rlist = NULL;
11107 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
11108 lower_omp (&par_body, ctx);
11109 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
11110 lower_reduction_clauses (clauses, &par_rlist, ctx);
11112 /* Declare all the variables created by mapping and the variables
11113 declared in the scope of the parallel body. */
11114 record_vars_into (ctx->block_vars, child_fn);
11115 record_vars_into (gimple_bind_vars (par_bind), child_fn);
11117 if (ctx->record_type)
11119 ctx->sender_decl
11120 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
11121 : ctx->record_type, ".omp_data_o");
11122 DECL_NAMELESS (ctx->sender_decl) = 1;
11123 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
11124 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
11127 olist = NULL;
11128 ilist = NULL;
11129 lower_send_clauses (clauses, &ilist, &olist, ctx);
11130 lower_send_shared_vars (&ilist, &olist, ctx);
11132 if (ctx->record_type)
11134 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
11135 TREE_THIS_VOLATILE (clobber) = 1;
11136 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
11137 clobber));
11140 /* Once all the expansions are done, sequence all the different
11141 fragments inside gimple_omp_body. */
11143 new_body = NULL;
11145 if (ctx->record_type)
11147 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
11148 /* fixup_child_record_type might have changed receiver_decl's type. */
11149 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
11150 gimple_seq_add_stmt (&new_body,
11151 gimple_build_assign (ctx->receiver_decl, t));
11154 gimple_seq_add_seq (&new_body, par_ilist);
11155 gimple_seq_add_seq (&new_body, par_body);
11156 gimple_seq_add_seq (&new_body, par_rlist);
11157 if (ctx->cancellable)
11158 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
11159 gimple_seq_add_seq (&new_body, par_olist);
11160 new_body = maybe_catch_exception (new_body);
11161 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
11162 gimple_omp_set_body (stmt, new_body);
11164 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
11165 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
11166 gimple_bind_add_seq (bind, ilist);
11167 gimple_bind_add_stmt (bind, stmt);
11168 gimple_bind_add_seq (bind, olist);
11170 pop_gimplify_context (NULL);
11172 if (dep_bind)
11174 gimple_bind_add_seq (dep_bind, dep_ilist);
11175 gimple_bind_add_stmt (dep_bind, bind);
11176 gimple_bind_add_seq (dep_bind, dep_olist);
11177 pop_gimplify_context (dep_bind);
11181 /* Lower the GIMPLE_OMP_TARGET in the current statement
11182 in GSI_P. CTX holds context information for the directive. */
11184 static void
11185 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
11187 tree clauses;
11188 tree child_fn, t, c;
11189 gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p));
11190 gbind *tgt_bind, *bind;
11191 gimple_seq tgt_body, olist, ilist, orlist, irlist, new_body;
11192 location_t loc = gimple_location (stmt);
11193 bool offloaded, data_region;
11194 unsigned int map_cnt = 0;
11196 offloaded = is_gimple_omp_offloaded (stmt);
11197 switch (gimple_omp_target_kind (stmt))
11199 case GF_OMP_TARGET_KIND_REGION:
11200 case GF_OMP_TARGET_KIND_UPDATE:
11201 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
11202 case GF_OMP_TARGET_KIND_OACC_KERNELS:
11203 case GF_OMP_TARGET_KIND_OACC_UPDATE:
11204 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
11205 data_region = false;
11206 break;
11207 case GF_OMP_TARGET_KIND_DATA:
11208 case GF_OMP_TARGET_KIND_OACC_DATA:
11209 data_region = true;
11210 break;
11211 default:
11212 gcc_unreachable ();
11215 clauses = gimple_omp_target_clauses (stmt);
11217 tgt_bind = NULL;
11218 tgt_body = NULL;
11219 if (offloaded)
11221 tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
11222 tgt_body = gimple_bind_body (tgt_bind);
11224 else if (data_region)
11225 tgt_body = gimple_omp_body (stmt);
11226 child_fn = ctx->cb.dst_fn;
11228 push_gimplify_context ();
11230 irlist = NULL;
11231 orlist = NULL;
11232 if (offloaded
11233 && is_gimple_omp_oacc (stmt))
11234 oacc_process_reduction_data (&tgt_body, &irlist, &orlist, ctx);
11236 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
11237 switch (OMP_CLAUSE_CODE (c))
11239 tree var, x;
11241 default:
11242 break;
11243 case OMP_CLAUSE_MAP:
11244 #ifdef ENABLE_CHECKING
11245 /* First check what we're prepared to handle in the following. */
11246 switch (OMP_CLAUSE_MAP_KIND (c))
11248 case GOMP_MAP_ALLOC:
11249 case GOMP_MAP_TO:
11250 case GOMP_MAP_FROM:
11251 case GOMP_MAP_TOFROM:
11252 case GOMP_MAP_POINTER:
11253 case GOMP_MAP_TO_PSET:
11254 break;
11255 case GOMP_MAP_FORCE_ALLOC:
11256 case GOMP_MAP_FORCE_TO:
11257 case GOMP_MAP_FORCE_FROM:
11258 case GOMP_MAP_FORCE_TOFROM:
11259 case GOMP_MAP_FORCE_PRESENT:
11260 case GOMP_MAP_FORCE_DEALLOC:
11261 case GOMP_MAP_FORCE_DEVICEPTR:
11262 gcc_assert (is_gimple_omp_oacc (stmt));
11263 break;
11264 default:
11265 gcc_unreachable ();
11267 #endif
11268 /* FALLTHRU */
11269 case OMP_CLAUSE_TO:
11270 case OMP_CLAUSE_FROM:
11271 var = OMP_CLAUSE_DECL (c);
11272 if (!DECL_P (var))
11274 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
11275 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
11276 map_cnt++;
11277 continue;
11280 if (DECL_SIZE (var)
11281 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
11283 tree var2 = DECL_VALUE_EXPR (var);
11284 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
11285 var2 = TREE_OPERAND (var2, 0);
11286 gcc_assert (DECL_P (var2));
11287 var = var2;
11290 if (!maybe_lookup_field (var, ctx))
11291 continue;
11293 if (offloaded)
11295 x = build_receiver_ref (var, true, ctx);
11296 tree new_var = lookup_decl (var, ctx);
11297 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
11298 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
11299 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
11300 x = build_simple_mem_ref (x);
11301 SET_DECL_VALUE_EXPR (new_var, x);
11302 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
11304 map_cnt++;
11307 if (offloaded)
11309 target_nesting_level++;
11310 lower_omp (&tgt_body, ctx);
11311 target_nesting_level--;
11313 else if (data_region)
11314 lower_omp (&tgt_body, ctx);
11316 if (offloaded)
11318 /* Declare all the variables created by mapping and the variables
11319 declared in the scope of the target body. */
11320 record_vars_into (ctx->block_vars, child_fn);
11321 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
11324 olist = NULL;
11325 ilist = NULL;
11326 if (ctx->record_type)
11328 ctx->sender_decl
11329 = create_tmp_var (ctx->record_type, ".omp_data_arr");
11330 DECL_NAMELESS (ctx->sender_decl) = 1;
11331 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
11332 t = make_tree_vec (3);
11333 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
11334 TREE_VEC_ELT (t, 1)
11335 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
11336 ".omp_data_sizes");
11337 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
11338 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
11339 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
11340 tree tkind_type;
11341 int talign_shift;
11342 if (is_gimple_omp_oacc (stmt))
11344 tkind_type = short_unsigned_type_node;
11345 talign_shift = 8;
11347 else
11349 tkind_type = unsigned_char_type_node;
11350 talign_shift = 3;
11352 TREE_VEC_ELT (t, 2)
11353 = create_tmp_var (build_array_type_nelts (tkind_type, map_cnt),
11354 ".omp_data_kinds");
11355 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
11356 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
11357 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
11358 gimple_omp_target_set_data_arg (stmt, t);
11360 vec<constructor_elt, va_gc> *vsize;
11361 vec<constructor_elt, va_gc> *vkind;
11362 vec_alloc (vsize, map_cnt);
11363 vec_alloc (vkind, map_cnt);
11364 unsigned int map_idx = 0;
11366 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
11367 switch (OMP_CLAUSE_CODE (c))
11369 tree ovar, nc;
11371 default:
11372 break;
11373 case OMP_CLAUSE_MAP:
11374 case OMP_CLAUSE_TO:
11375 case OMP_CLAUSE_FROM:
11376 nc = c;
11377 ovar = OMP_CLAUSE_DECL (c);
11378 if (!DECL_P (ovar))
11380 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
11381 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
11383 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
11384 == get_base_address (ovar));
11385 nc = OMP_CLAUSE_CHAIN (c);
11386 ovar = OMP_CLAUSE_DECL (nc);
11388 else
11390 tree x = build_sender_ref (ovar, ctx);
11391 tree v
11392 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
11393 gimplify_assign (x, v, &ilist);
11394 nc = NULL_TREE;
11397 else
11399 if (DECL_SIZE (ovar)
11400 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
11402 tree ovar2 = DECL_VALUE_EXPR (ovar);
11403 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
11404 ovar2 = TREE_OPERAND (ovar2, 0);
11405 gcc_assert (DECL_P (ovar2));
11406 ovar = ovar2;
11408 if (!maybe_lookup_field (ovar, ctx))
11409 continue;
11412 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
11413 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
11414 talign = DECL_ALIGN_UNIT (ovar);
11415 if (nc)
11417 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
11418 tree x = build_sender_ref (ovar, ctx);
11419 if (maybe_lookup_oacc_reduction (var, ctx))
11421 gcc_checking_assert (offloaded
11422 && is_gimple_omp_oacc (stmt));
11423 gimplify_assign (x, var, &ilist);
11425 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
11426 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
11427 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
11428 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
11430 gcc_assert (offloaded);
11431 tree avar
11432 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)));
11433 mark_addressable (avar);
11434 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
11435 talign = DECL_ALIGN_UNIT (avar);
11436 avar = build_fold_addr_expr (avar);
11437 gimplify_assign (x, avar, &ilist);
11439 else if (is_gimple_reg (var))
11441 gcc_assert (offloaded);
11442 tree avar = create_tmp_var (TREE_TYPE (var));
11443 mark_addressable (avar);
11444 enum gomp_map_kind map_kind = OMP_CLAUSE_MAP_KIND (c);
11445 if (GOMP_MAP_COPY_TO_P (map_kind)
11446 || map_kind == GOMP_MAP_POINTER
11447 || map_kind == GOMP_MAP_TO_PSET
11448 || map_kind == GOMP_MAP_FORCE_DEVICEPTR)
11449 gimplify_assign (avar, var, &ilist);
11450 avar = build_fold_addr_expr (avar);
11451 gimplify_assign (x, avar, &ilist);
11452 if ((GOMP_MAP_COPY_FROM_P (map_kind)
11453 || map_kind == GOMP_MAP_FORCE_DEVICEPTR)
11454 && !TYPE_READONLY (TREE_TYPE (var)))
11456 x = build_sender_ref (ovar, ctx);
11457 x = build_simple_mem_ref (x);
11458 gimplify_assign (var, x, &olist);
11461 else
11463 var = build_fold_addr_expr (var);
11464 gimplify_assign (x, var, &ilist);
11467 tree s = OMP_CLAUSE_SIZE (c);
11468 if (s == NULL_TREE)
11469 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
11470 s = fold_convert (size_type_node, s);
11471 tree purpose = size_int (map_idx++);
11472 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
11473 if (TREE_CODE (s) != INTEGER_CST)
11474 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
11476 unsigned HOST_WIDE_INT tkind;
11477 switch (OMP_CLAUSE_CODE (c))
11479 case OMP_CLAUSE_MAP:
11480 tkind = OMP_CLAUSE_MAP_KIND (c);
11481 break;
11482 case OMP_CLAUSE_TO:
11483 tkind = GOMP_MAP_TO;
11484 break;
11485 case OMP_CLAUSE_FROM:
11486 tkind = GOMP_MAP_FROM;
11487 break;
11488 default:
11489 gcc_unreachable ();
11491 gcc_checking_assert (tkind
11492 < (HOST_WIDE_INT_C (1U) << talign_shift));
11493 talign = ceil_log2 (talign);
11494 tkind |= talign << talign_shift;
11495 gcc_checking_assert (tkind
11496 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
11497 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
11498 build_int_cstu (tkind_type, tkind));
11499 if (nc && nc != c)
11500 c = nc;
11503 gcc_assert (map_idx == map_cnt);
11505 DECL_INITIAL (TREE_VEC_ELT (t, 1))
11506 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
11507 DECL_INITIAL (TREE_VEC_ELT (t, 2))
11508 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
11509 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
11511 gimple_seq initlist = NULL;
11512 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
11513 TREE_VEC_ELT (t, 1)),
11514 &initlist, true, NULL_TREE);
11515 gimple_seq_add_seq (&ilist, initlist);
11517 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
11518 NULL);
11519 TREE_THIS_VOLATILE (clobber) = 1;
11520 gimple_seq_add_stmt (&olist,
11521 gimple_build_assign (TREE_VEC_ELT (t, 1),
11522 clobber));
11525 tree clobber = build_constructor (ctx->record_type, NULL);
11526 TREE_THIS_VOLATILE (clobber) = 1;
11527 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
11528 clobber));
11531 /* Once all the expansions are done, sequence all the different
11532 fragments inside gimple_omp_body. */
11534 new_body = NULL;
11536 if (offloaded
11537 && ctx->record_type)
11539 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
11540 /* fixup_child_record_type might have changed receiver_decl's type. */
11541 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
11542 gimple_seq_add_stmt (&new_body,
11543 gimple_build_assign (ctx->receiver_decl, t));
11546 if (offloaded)
11548 gimple_seq_add_seq (&new_body, tgt_body);
11549 new_body = maybe_catch_exception (new_body);
11551 else if (data_region)
11552 new_body = tgt_body;
11553 if (offloaded || data_region)
11555 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
11556 gimple_omp_set_body (stmt, new_body);
11559 bind = gimple_build_bind (NULL, NULL,
11560 tgt_bind ? gimple_bind_block (tgt_bind)
11561 : NULL_TREE);
11562 gsi_replace (gsi_p, bind, true);
11563 gimple_bind_add_seq (bind, irlist);
11564 gimple_bind_add_seq (bind, ilist);
11565 gimple_bind_add_stmt (bind, stmt);
11566 gimple_bind_add_seq (bind, olist);
11567 gimple_bind_add_seq (bind, orlist);
11569 pop_gimplify_context (NULL);
11572 /* Expand code for an OpenMP teams directive. */
11574 static void
11575 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
11577 gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p));
11578 push_gimplify_context ();
11580 tree block = make_node (BLOCK);
11581 gbind *bind = gimple_build_bind (NULL, NULL, block);
11582 gsi_replace (gsi_p, bind, true);
11583 gimple_seq bind_body = NULL;
11584 gimple_seq dlist = NULL;
11585 gimple_seq olist = NULL;
11587 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
11588 OMP_CLAUSE_NUM_TEAMS);
11589 if (num_teams == NULL_TREE)
11590 num_teams = build_int_cst (unsigned_type_node, 0);
11591 else
11593 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
11594 num_teams = fold_convert (unsigned_type_node, num_teams);
11595 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
11597 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
11598 OMP_CLAUSE_THREAD_LIMIT);
11599 if (thread_limit == NULL_TREE)
11600 thread_limit = build_int_cst (unsigned_type_node, 0);
11601 else
11603 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
11604 thread_limit = fold_convert (unsigned_type_node, thread_limit);
11605 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
11606 fb_rvalue);
11609 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
11610 &bind_body, &dlist, ctx, NULL);
11611 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
11612 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
11613 gimple_seq_add_stmt (&bind_body, teams_stmt);
11615 location_t loc = gimple_location (teams_stmt);
11616 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
11617 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
11618 gimple_set_location (call, loc);
11619 gimple_seq_add_stmt (&bind_body, call);
11621 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
11622 gimple_omp_set_body (teams_stmt, NULL);
11623 gimple_seq_add_seq (&bind_body, olist);
11624 gimple_seq_add_seq (&bind_body, dlist);
11625 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
11626 gimple_bind_set_body (bind, bind_body);
11628 pop_gimplify_context (bind);
11630 gimple_bind_append_vars (bind, ctx->block_vars);
11631 BLOCK_VARS (block) = ctx->block_vars;
11632 if (BLOCK_VARS (block))
11633 TREE_USED (block) = 1;
11637 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
11638 regimplified. If DATA is non-NULL, lower_omp_1 is outside
11639 of OMP context, but with task_shared_vars set. */
11641 static tree
11642 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
11643 void *data)
11645 tree t = *tp;
11647 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
11648 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
11649 return t;
11651 if (task_shared_vars
11652 && DECL_P (t)
11653 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
11654 return t;
11656 /* If a global variable has been privatized, TREE_CONSTANT on
11657 ADDR_EXPR might be wrong. */
11658 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
11659 recompute_tree_invariant_for_addr_expr (t);
11661 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
11662 return NULL_TREE;
11665 static void
11666 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
11668 gimple stmt = gsi_stmt (*gsi_p);
11669 struct walk_stmt_info wi;
11670 gcall *call_stmt;
11672 if (gimple_has_location (stmt))
11673 input_location = gimple_location (stmt);
11675 if (task_shared_vars)
11676 memset (&wi, '\0', sizeof (wi));
11678 /* If we have issued syntax errors, avoid doing any heavy lifting.
11679 Just replace the OMP directives with a NOP to avoid
11680 confusing RTL expansion. */
11681 if (seen_error () && is_gimple_omp (stmt))
11683 gsi_replace (gsi_p, gimple_build_nop (), true);
11684 return;
11687 switch (gimple_code (stmt))
11689 case GIMPLE_COND:
11691 gcond *cond_stmt = as_a <gcond *> (stmt);
11692 if ((ctx || task_shared_vars)
11693 && (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
11694 lower_omp_regimplify_p,
11695 ctx ? NULL : &wi, NULL)
11696 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
11697 lower_omp_regimplify_p,
11698 ctx ? NULL : &wi, NULL)))
11699 gimple_regimplify_operands (cond_stmt, gsi_p);
11701 break;
11702 case GIMPLE_CATCH:
11703 lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx);
11704 break;
11705 case GIMPLE_EH_FILTER:
11706 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
11707 break;
11708 case GIMPLE_TRY:
11709 lower_omp (gimple_try_eval_ptr (stmt), ctx);
11710 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
11711 break;
11712 case GIMPLE_TRANSACTION:
11713 lower_omp (gimple_transaction_body_ptr (
11714 as_a <gtransaction *> (stmt)),
11715 ctx);
11716 break;
11717 case GIMPLE_BIND:
11718 lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx);
11719 break;
11720 case GIMPLE_OMP_PARALLEL:
11721 case GIMPLE_OMP_TASK:
11722 ctx = maybe_lookup_ctx (stmt);
11723 gcc_assert (ctx);
11724 if (ctx->cancellable)
11725 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
11726 lower_omp_taskreg (gsi_p, ctx);
11727 break;
11728 case GIMPLE_OMP_FOR:
11729 ctx = maybe_lookup_ctx (stmt);
11730 gcc_assert (ctx);
11731 if (ctx->cancellable)
11732 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
11733 lower_omp_for (gsi_p, ctx);
11734 break;
11735 case GIMPLE_OMP_SECTIONS:
11736 ctx = maybe_lookup_ctx (stmt);
11737 gcc_assert (ctx);
11738 if (ctx->cancellable)
11739 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
11740 lower_omp_sections (gsi_p, ctx);
11741 break;
11742 case GIMPLE_OMP_SINGLE:
11743 ctx = maybe_lookup_ctx (stmt);
11744 gcc_assert (ctx);
11745 lower_omp_single (gsi_p, ctx);
11746 break;
11747 case GIMPLE_OMP_MASTER:
11748 ctx = maybe_lookup_ctx (stmt);
11749 gcc_assert (ctx);
11750 lower_omp_master (gsi_p, ctx);
11751 break;
11752 case GIMPLE_OMP_TASKGROUP:
11753 ctx = maybe_lookup_ctx (stmt);
11754 gcc_assert (ctx);
11755 lower_omp_taskgroup (gsi_p, ctx);
11756 break;
11757 case GIMPLE_OMP_ORDERED:
11758 ctx = maybe_lookup_ctx (stmt);
11759 gcc_assert (ctx);
11760 lower_omp_ordered (gsi_p, ctx);
11761 break;
11762 case GIMPLE_OMP_CRITICAL:
11763 ctx = maybe_lookup_ctx (stmt);
11764 gcc_assert (ctx);
11765 lower_omp_critical (gsi_p, ctx);
11766 break;
11767 case GIMPLE_OMP_ATOMIC_LOAD:
11768 if ((ctx || task_shared_vars)
11769 && walk_tree (gimple_omp_atomic_load_rhs_ptr (
11770 as_a <gomp_atomic_load *> (stmt)),
11771 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
11772 gimple_regimplify_operands (stmt, gsi_p);
11773 break;
11774 case GIMPLE_OMP_TARGET:
11775 ctx = maybe_lookup_ctx (stmt);
11776 gcc_assert (ctx);
11777 lower_omp_target (gsi_p, ctx);
11778 break;
11779 case GIMPLE_OMP_TEAMS:
11780 ctx = maybe_lookup_ctx (stmt);
11781 gcc_assert (ctx);
11782 lower_omp_teams (gsi_p, ctx);
11783 break;
11784 case GIMPLE_CALL:
11785 tree fndecl;
11786 call_stmt = as_a <gcall *> (stmt);
11787 fndecl = gimple_call_fndecl (call_stmt);
11788 if (fndecl
11789 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
11790 switch (DECL_FUNCTION_CODE (fndecl))
11792 case BUILT_IN_GOMP_BARRIER:
11793 if (ctx == NULL)
11794 break;
11795 /* FALLTHRU */
11796 case BUILT_IN_GOMP_CANCEL:
11797 case BUILT_IN_GOMP_CANCELLATION_POINT:
11798 omp_context *cctx;
11799 cctx = ctx;
11800 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
11801 cctx = cctx->outer;
11802 gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
11803 if (!cctx->cancellable)
11805 if (DECL_FUNCTION_CODE (fndecl)
11806 == BUILT_IN_GOMP_CANCELLATION_POINT)
11808 stmt = gimple_build_nop ();
11809 gsi_replace (gsi_p, stmt, false);
11811 break;
11813 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
11815 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
11816 gimple_call_set_fndecl (call_stmt, fndecl);
11817 gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
11819 tree lhs;
11820 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)));
11821 gimple_call_set_lhs (call_stmt, lhs);
11822 tree fallthru_label;
11823 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
11824 gimple g;
11825 g = gimple_build_label (fallthru_label);
11826 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
11827 g = gimple_build_cond (NE_EXPR, lhs,
11828 fold_convert (TREE_TYPE (lhs),
11829 boolean_false_node),
11830 cctx->cancel_label, fallthru_label);
11831 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
11832 break;
11833 default:
11834 break;
11836 /* FALLTHRU */
11837 default:
11838 if ((ctx || task_shared_vars)
11839 && walk_gimple_op (stmt, lower_omp_regimplify_p,
11840 ctx ? NULL : &wi))
11842 /* Just remove clobbers, this should happen only if we have
11843 "privatized" local addressable variables in SIMD regions,
11844 the clobber isn't needed in that case and gimplifying address
11845 of the ARRAY_REF into a pointer and creating MEM_REF based
11846 clobber would create worse code than we get with the clobber
11847 dropped. */
11848 if (gimple_clobber_p (stmt))
11850 gsi_replace (gsi_p, gimple_build_nop (), true);
11851 break;
11853 gimple_regimplify_operands (stmt, gsi_p);
11855 break;
11859 static void
11860 lower_omp (gimple_seq *body, omp_context *ctx)
11862 location_t saved_location = input_location;
11863 gimple_stmt_iterator gsi;
11864 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
11865 lower_omp_1 (&gsi, ctx);
11866 /* During gimplification, we haven't folded statments inside offloading
11867 regions (gimplify.c:maybe_fold_stmt); do that now. */
11868 if (target_nesting_level)
11869 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
11870 fold_stmt (&gsi);
11871 input_location = saved_location;
11874 /* Main entry point. */
11876 static unsigned int
11877 execute_lower_omp (void)
11879 gimple_seq body;
11880 int i;
11881 omp_context *ctx;
11883 /* This pass always runs, to provide PROP_gimple_lomp.
11884 But often, there is nothing to do. */
11885 if (flag_cilkplus == 0 && flag_openacc == 0 && flag_openmp == 0
11886 && flag_openmp_simd == 0)
11887 return 0;
11889 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
11890 delete_omp_context);
11892 body = gimple_body (current_function_decl);
11893 scan_omp (&body, NULL);
11894 gcc_assert (taskreg_nesting_level == 0);
11895 FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
11896 finish_taskreg_scan (ctx);
11897 taskreg_contexts.release ();
11899 if (all_contexts->root)
11901 if (task_shared_vars)
11902 push_gimplify_context ();
11903 lower_omp (&body, NULL);
11904 if (task_shared_vars)
11905 pop_gimplify_context (NULL);
11908 if (all_contexts)
11910 splay_tree_delete (all_contexts);
11911 all_contexts = NULL;
11913 BITMAP_FREE (task_shared_vars);
11914 return 0;
11917 namespace {
11919 const pass_data pass_data_lower_omp =
11921 GIMPLE_PASS, /* type */
11922 "omplower", /* name */
11923 OPTGROUP_NONE, /* optinfo_flags */
11924 TV_NONE, /* tv_id */
11925 PROP_gimple_any, /* properties_required */
11926 PROP_gimple_lomp, /* properties_provided */
11927 0, /* properties_destroyed */
11928 0, /* todo_flags_start */
11929 0, /* todo_flags_finish */
11932 class pass_lower_omp : public gimple_opt_pass
11934 public:
11935 pass_lower_omp (gcc::context *ctxt)
11936 : gimple_opt_pass (pass_data_lower_omp, ctxt)
11939 /* opt_pass methods: */
11940 virtual unsigned int execute (function *) { return execute_lower_omp (); }
11942 }; // class pass_lower_omp
11944 } // anon namespace
11946 gimple_opt_pass *
11947 make_pass_lower_omp (gcc::context *ctxt)
11949 return new pass_lower_omp (ctxt);
11952 /* The following is a utility to diagnose structured block violations.
11953 It is not part of the "omplower" pass, as that's invoked too late. It
11954 should be invoked by the respective front ends after gimplification. */
11956 static splay_tree all_labels;
11958 /* Check for mismatched contexts and generate an error if needed. Return
11959 true if an error is detected. */
11961 static bool
11962 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
11963 gimple branch_ctx, gimple label_ctx)
11965 gcc_checking_assert (!branch_ctx || is_gimple_omp (branch_ctx));
11966 gcc_checking_assert (!label_ctx || is_gimple_omp (label_ctx));
11968 if (label_ctx == branch_ctx)
11969 return false;
11971 const char* kind = NULL;
11973 if (flag_cilkplus)
11975 if ((branch_ctx
11976 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
11977 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
11978 || (label_ctx
11979 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
11980 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
11981 kind = "Cilk Plus";
11983 if (flag_openacc)
11985 if ((branch_ctx && is_gimple_omp_oacc (branch_ctx))
11986 || (label_ctx && is_gimple_omp_oacc (label_ctx)))
11988 gcc_checking_assert (kind == NULL);
11989 kind = "OpenACC";
11992 if (kind == NULL)
11994 gcc_checking_assert (flag_openmp);
11995 kind = "OpenMP";
11999 Previously we kept track of the label's entire context in diagnose_sb_[12]
12000 so we could traverse it and issue a correct "exit" or "enter" error
12001 message upon a structured block violation.
12003 We built the context by building a list with tree_cons'ing, but there is
12004 no easy counterpart in gimple tuples. It seems like far too much work
12005 for issuing exit/enter error messages. If someone really misses the
12006 distinct error message... patches welcome.
12009 #if 0
12010 /* Try to avoid confusing the user by producing and error message
12011 with correct "exit" or "enter" verbiage. We prefer "exit"
12012 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
12013 if (branch_ctx == NULL)
12014 exit_p = false;
12015 else
12017 while (label_ctx)
12019 if (TREE_VALUE (label_ctx) == branch_ctx)
12021 exit_p = false;
12022 break;
12024 label_ctx = TREE_CHAIN (label_ctx);
12028 if (exit_p)
12029 error ("invalid exit from %s structured block", kind);
12030 else
12031 error ("invalid entry to %s structured block", kind);
12032 #endif
12034 /* If it's obvious we have an invalid entry, be specific about the error. */
12035 if (branch_ctx == NULL)
12036 error ("invalid entry to %s structured block", kind);
12037 else
12039 /* Otherwise, be vague and lazy, but efficient. */
12040 error ("invalid branch to/from %s structured block", kind);
12043 gsi_replace (gsi_p, gimple_build_nop (), false);
12044 return true;
12047 /* Pass 1: Create a minimal tree of structured blocks, and record
12048 where each label is found. */
12050 static tree
12051 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
12052 struct walk_stmt_info *wi)
12054 gimple context = (gimple) wi->info;
12055 gimple inner_context;
12056 gimple stmt = gsi_stmt (*gsi_p);
12058 *handled_ops_p = true;
12060 switch (gimple_code (stmt))
12062 WALK_SUBSTMTS;
12064 case GIMPLE_OMP_PARALLEL:
12065 case GIMPLE_OMP_TASK:
12066 case GIMPLE_OMP_SECTIONS:
12067 case GIMPLE_OMP_SINGLE:
12068 case GIMPLE_OMP_SECTION:
12069 case GIMPLE_OMP_MASTER:
12070 case GIMPLE_OMP_ORDERED:
12071 case GIMPLE_OMP_CRITICAL:
12072 case GIMPLE_OMP_TARGET:
12073 case GIMPLE_OMP_TEAMS:
12074 case GIMPLE_OMP_TASKGROUP:
12075 /* The minimal context here is just the current OMP construct. */
12076 inner_context = stmt;
12077 wi->info = inner_context;
12078 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
12079 wi->info = context;
12080 break;
12082 case GIMPLE_OMP_FOR:
12083 inner_context = stmt;
12084 wi->info = inner_context;
12085 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
12086 walk them. */
12087 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
12088 diagnose_sb_1, NULL, wi);
12089 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
12090 wi->info = context;
12091 break;
12093 case GIMPLE_LABEL:
12094 splay_tree_insert (all_labels,
12095 (splay_tree_key) gimple_label_label (
12096 as_a <glabel *> (stmt)),
12097 (splay_tree_value) context);
12098 break;
12100 default:
12101 break;
12104 return NULL_TREE;
12107 /* Pass 2: Check each branch and see if its context differs from that of
12108 the destination label's context. */
12110 static tree
12111 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
12112 struct walk_stmt_info *wi)
12114 gimple context = (gimple) wi->info;
12115 splay_tree_node n;
12116 gimple stmt = gsi_stmt (*gsi_p);
12118 *handled_ops_p = true;
12120 switch (gimple_code (stmt))
12122 WALK_SUBSTMTS;
12124 case GIMPLE_OMP_PARALLEL:
12125 case GIMPLE_OMP_TASK:
12126 case GIMPLE_OMP_SECTIONS:
12127 case GIMPLE_OMP_SINGLE:
12128 case GIMPLE_OMP_SECTION:
12129 case GIMPLE_OMP_MASTER:
12130 case GIMPLE_OMP_ORDERED:
12131 case GIMPLE_OMP_CRITICAL:
12132 case GIMPLE_OMP_TARGET:
12133 case GIMPLE_OMP_TEAMS:
12134 case GIMPLE_OMP_TASKGROUP:
12135 wi->info = stmt;
12136 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
12137 wi->info = context;
12138 break;
12140 case GIMPLE_OMP_FOR:
12141 wi->info = stmt;
12142 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
12143 walk them. */
12144 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
12145 diagnose_sb_2, NULL, wi);
12146 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
12147 wi->info = context;
12148 break;
12150 case GIMPLE_COND:
12152 gcond *cond_stmt = as_a <gcond *> (stmt);
12153 tree lab = gimple_cond_true_label (cond_stmt);
12154 if (lab)
12156 n = splay_tree_lookup (all_labels,
12157 (splay_tree_key) lab);
12158 diagnose_sb_0 (gsi_p, context,
12159 n ? (gimple) n->value : NULL);
12161 lab = gimple_cond_false_label (cond_stmt);
12162 if (lab)
12164 n = splay_tree_lookup (all_labels,
12165 (splay_tree_key) lab);
12166 diagnose_sb_0 (gsi_p, context,
12167 n ? (gimple) n->value : NULL);
12170 break;
12172 case GIMPLE_GOTO:
12174 tree lab = gimple_goto_dest (stmt);
12175 if (TREE_CODE (lab) != LABEL_DECL)
12176 break;
12178 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
12179 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
12181 break;
12183 case GIMPLE_SWITCH:
12185 gswitch *switch_stmt = as_a <gswitch *> (stmt);
12186 unsigned int i;
12187 for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
12189 tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
12190 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
12191 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
12192 break;
12195 break;
12197 case GIMPLE_RETURN:
12198 diagnose_sb_0 (gsi_p, context, NULL);
12199 break;
12201 default:
12202 break;
12205 return NULL_TREE;
12208 /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant
12209 GIMPLE_* codes. */
12210 bool
12211 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
12212 int *region_idx)
12214 gimple last = last_stmt (bb);
12215 enum gimple_code code = gimple_code (last);
12216 struct omp_region *cur_region = *region;
12217 bool fallthru = false;
12219 switch (code)
12221 case GIMPLE_OMP_PARALLEL:
12222 case GIMPLE_OMP_TASK:
12223 case GIMPLE_OMP_FOR:
12224 case GIMPLE_OMP_SINGLE:
12225 case GIMPLE_OMP_TEAMS:
12226 case GIMPLE_OMP_MASTER:
12227 case GIMPLE_OMP_TASKGROUP:
12228 case GIMPLE_OMP_ORDERED:
12229 case GIMPLE_OMP_CRITICAL:
12230 case GIMPLE_OMP_SECTION:
12231 cur_region = new_omp_region (bb, code, cur_region);
12232 fallthru = true;
12233 break;
12235 case GIMPLE_OMP_TARGET:
12236 cur_region = new_omp_region (bb, code, cur_region);
12237 fallthru = true;
12238 switch (gimple_omp_target_kind (last))
12240 case GF_OMP_TARGET_KIND_REGION:
12241 case GF_OMP_TARGET_KIND_DATA:
12242 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
12243 case GF_OMP_TARGET_KIND_OACC_KERNELS:
12244 case GF_OMP_TARGET_KIND_OACC_DATA:
12245 break;
12246 case GF_OMP_TARGET_KIND_UPDATE:
12247 case GF_OMP_TARGET_KIND_OACC_UPDATE:
12248 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
12249 cur_region = cur_region->outer;
12250 break;
12251 default:
12252 gcc_unreachable ();
12254 break;
12256 case GIMPLE_OMP_SECTIONS:
12257 cur_region = new_omp_region (bb, code, cur_region);
12258 fallthru = true;
12259 break;
12261 case GIMPLE_OMP_SECTIONS_SWITCH:
12262 fallthru = false;
12263 break;
12265 case GIMPLE_OMP_ATOMIC_LOAD:
12266 case GIMPLE_OMP_ATOMIC_STORE:
12267 fallthru = true;
12268 break;
12270 case GIMPLE_OMP_RETURN:
12271 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
12272 somewhere other than the next block. This will be
12273 created later. */
12274 cur_region->exit = bb;
12275 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
12276 cur_region = cur_region->outer;
12277 break;
12279 case GIMPLE_OMP_CONTINUE:
12280 cur_region->cont = bb;
12281 switch (cur_region->type)
12283 case GIMPLE_OMP_FOR:
12284 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
12285 succs edges as abnormal to prevent splitting
12286 them. */
12287 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
12288 /* Make the loopback edge. */
12289 make_edge (bb, single_succ (cur_region->entry),
12290 EDGE_ABNORMAL);
12292 /* Create an edge from GIMPLE_OMP_FOR to exit, which
12293 corresponds to the case that the body of the loop
12294 is not executed at all. */
12295 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
12296 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
12297 fallthru = false;
12298 break;
12300 case GIMPLE_OMP_SECTIONS:
12301 /* Wire up the edges into and out of the nested sections. */
12303 basic_block switch_bb = single_succ (cur_region->entry);
12305 struct omp_region *i;
12306 for (i = cur_region->inner; i ; i = i->next)
12308 gcc_assert (i->type == GIMPLE_OMP_SECTION);
12309 make_edge (switch_bb, i->entry, 0);
12310 make_edge (i->exit, bb, EDGE_FALLTHRU);
12313 /* Make the loopback edge to the block with
12314 GIMPLE_OMP_SECTIONS_SWITCH. */
12315 make_edge (bb, switch_bb, 0);
12317 /* Make the edge from the switch to exit. */
12318 make_edge (switch_bb, bb->next_bb, 0);
12319 fallthru = false;
12321 break;
12323 default:
12324 gcc_unreachable ();
12326 break;
12328 default:
12329 gcc_unreachable ();
12332 if (*region != cur_region)
12334 *region = cur_region;
12335 if (cur_region)
12336 *region_idx = cur_region->entry->index;
12337 else
12338 *region_idx = 0;
12341 return fallthru;
12344 static unsigned int
12345 diagnose_omp_structured_block_errors (void)
12347 struct walk_stmt_info wi;
12348 gimple_seq body = gimple_body (current_function_decl);
12350 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
12352 memset (&wi, 0, sizeof (wi));
12353 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
12355 memset (&wi, 0, sizeof (wi));
12356 wi.want_locations = true;
12357 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
12359 gimple_set_body (current_function_decl, body);
12361 splay_tree_delete (all_labels);
12362 all_labels = NULL;
12364 return 0;
12367 namespace {
12369 const pass_data pass_data_diagnose_omp_blocks =
12371 GIMPLE_PASS, /* type */
12372 "*diagnose_omp_blocks", /* name */
12373 OPTGROUP_NONE, /* optinfo_flags */
12374 TV_NONE, /* tv_id */
12375 PROP_gimple_any, /* properties_required */
12376 0, /* properties_provided */
12377 0, /* properties_destroyed */
12378 0, /* todo_flags_start */
12379 0, /* todo_flags_finish */
12382 class pass_diagnose_omp_blocks : public gimple_opt_pass
12384 public:
12385 pass_diagnose_omp_blocks (gcc::context *ctxt)
12386 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
12389 /* opt_pass methods: */
12390 virtual bool gate (function *)
12392 return flag_cilkplus || flag_openacc || flag_openmp;
12394 virtual unsigned int execute (function *)
12396 return diagnose_omp_structured_block_errors ();
12399 }; // class pass_diagnose_omp_blocks
12401 } // anon namespace
12403 gimple_opt_pass *
12404 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
12406 return new pass_diagnose_omp_blocks (ctxt);
12409 /* SIMD clone supporting code. */
12411 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
12412 of arguments to reserve space for. */
12414 static struct cgraph_simd_clone *
12415 simd_clone_struct_alloc (int nargs)
12417 struct cgraph_simd_clone *clone_info;
12418 size_t len = (sizeof (struct cgraph_simd_clone)
12419 + nargs * sizeof (struct cgraph_simd_clone_arg));
12420 clone_info = (struct cgraph_simd_clone *)
12421 ggc_internal_cleared_alloc (len);
12422 return clone_info;
12425 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
12427 static inline void
12428 simd_clone_struct_copy (struct cgraph_simd_clone *to,
12429 struct cgraph_simd_clone *from)
12431 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
12432 + ((from->nargs - from->inbranch)
12433 * sizeof (struct cgraph_simd_clone_arg))));
12436 /* Return vector of parameter types of function FNDECL. This uses
12437 TYPE_ARG_TYPES if available, otherwise falls back to types of
12438 DECL_ARGUMENTS types. */
12440 vec<tree>
12441 simd_clone_vector_of_formal_parm_types (tree fndecl)
12443 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
12444 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
12445 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
12446 unsigned int i;
12447 tree arg;
12448 FOR_EACH_VEC_ELT (args, i, arg)
12449 args[i] = TREE_TYPE (args[i]);
12450 return args;
12453 /* Given a simd function in NODE, extract the simd specific
12454 information from the OMP clauses passed in CLAUSES, and return
12455 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
12456 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
12457 otherwise set to FALSE. */
12459 static struct cgraph_simd_clone *
12460 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
12461 bool *inbranch_specified)
12463 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
12464 tree t;
12465 int n;
12466 *inbranch_specified = false;
12468 n = args.length ();
12469 if (n > 0 && args.last () == void_type_node)
12470 n--;
12472 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
12473 be cloned have a distinctive artificial label in addition to "omp
12474 declare simd". */
12475 bool cilk_clone
12476 = (flag_cilkplus
12477 && lookup_attribute ("cilk simd function",
12478 DECL_ATTRIBUTES (node->decl)));
12480 /* Allocate one more than needed just in case this is an in-branch
12481 clone which will require a mask argument. */
12482 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
12483 clone_info->nargs = n;
12484 clone_info->cilk_elemental = cilk_clone;
12486 if (!clauses)
12488 args.release ();
12489 return clone_info;
12491 clauses = TREE_VALUE (clauses);
12492 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
12493 return clone_info;
12495 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
12497 switch (OMP_CLAUSE_CODE (t))
12499 case OMP_CLAUSE_INBRANCH:
12500 clone_info->inbranch = 1;
12501 *inbranch_specified = true;
12502 break;
12503 case OMP_CLAUSE_NOTINBRANCH:
12504 clone_info->inbranch = 0;
12505 *inbranch_specified = true;
12506 break;
12507 case OMP_CLAUSE_SIMDLEN:
12508 clone_info->simdlen
12509 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
12510 break;
12511 case OMP_CLAUSE_LINEAR:
12513 tree decl = OMP_CLAUSE_DECL (t);
12514 tree step = OMP_CLAUSE_LINEAR_STEP (t);
12515 int argno = TREE_INT_CST_LOW (decl);
12516 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
12518 clone_info->args[argno].arg_type
12519 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
12520 clone_info->args[argno].linear_step = tree_to_shwi (step);
12521 gcc_assert (clone_info->args[argno].linear_step >= 0
12522 && clone_info->args[argno].linear_step < n);
12524 else
12526 if (POINTER_TYPE_P (args[argno]))
12527 step = fold_convert (ssizetype, step);
12528 if (!tree_fits_shwi_p (step))
12530 warning_at (OMP_CLAUSE_LOCATION (t), 0,
12531 "ignoring large linear step");
12532 args.release ();
12533 return NULL;
12535 else if (integer_zerop (step))
12537 warning_at (OMP_CLAUSE_LOCATION (t), 0,
12538 "ignoring zero linear step");
12539 args.release ();
12540 return NULL;
12542 else
12544 clone_info->args[argno].arg_type
12545 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
12546 clone_info->args[argno].linear_step = tree_to_shwi (step);
12549 break;
12551 case OMP_CLAUSE_UNIFORM:
12553 tree decl = OMP_CLAUSE_DECL (t);
12554 int argno = tree_to_uhwi (decl);
12555 clone_info->args[argno].arg_type
12556 = SIMD_CLONE_ARG_TYPE_UNIFORM;
12557 break;
12559 case OMP_CLAUSE_ALIGNED:
12561 tree decl = OMP_CLAUSE_DECL (t);
12562 int argno = tree_to_uhwi (decl);
12563 clone_info->args[argno].alignment
12564 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
12565 break;
12567 default:
12568 break;
12571 args.release ();
12572 return clone_info;
12575 /* Given a SIMD clone in NODE, calculate the characteristic data
12576 type and return the coresponding type. The characteristic data
12577 type is computed as described in the Intel Vector ABI. */
12579 static tree
12580 simd_clone_compute_base_data_type (struct cgraph_node *node,
12581 struct cgraph_simd_clone *clone_info)
12583 tree type = integer_type_node;
12584 tree fndecl = node->decl;
12586 /* a) For non-void function, the characteristic data type is the
12587 return type. */
12588 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
12589 type = TREE_TYPE (TREE_TYPE (fndecl));
12591 /* b) If the function has any non-uniform, non-linear parameters,
12592 then the characteristic data type is the type of the first
12593 such parameter. */
12594 else
12596 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
12597 for (unsigned int i = 0; i < clone_info->nargs; ++i)
12598 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
12600 type = map[i];
12601 break;
12603 map.release ();
12606 /* c) If the characteristic data type determined by a) or b) above
12607 is struct, union, or class type which is pass-by-value (except
12608 for the type that maps to the built-in complex data type), the
12609 characteristic data type is int. */
12610 if (RECORD_OR_UNION_TYPE_P (type)
12611 && !aggregate_value_p (type, NULL)
12612 && TREE_CODE (type) != COMPLEX_TYPE)
12613 return integer_type_node;
12615 /* d) If none of the above three classes is applicable, the
12616 characteristic data type is int. */
12618 return type;
12620 /* e) For Intel Xeon Phi native and offload compilation, if the
12621 resulting characteristic data type is 8-bit or 16-bit integer
12622 data type, the characteristic data type is int. */
12623 /* Well, we don't handle Xeon Phi yet. */
12626 static tree
12627 simd_clone_mangle (struct cgraph_node *node,
12628 struct cgraph_simd_clone *clone_info)
12630 char vecsize_mangle = clone_info->vecsize_mangle;
12631 char mask = clone_info->inbranch ? 'M' : 'N';
12632 unsigned int simdlen = clone_info->simdlen;
12633 unsigned int n;
12634 pretty_printer pp;
12636 gcc_assert (vecsize_mangle && simdlen);
12638 pp_string (&pp, "_ZGV");
12639 pp_character (&pp, vecsize_mangle);
12640 pp_character (&pp, mask);
12641 pp_decimal_int (&pp, simdlen);
12643 for (n = 0; n < clone_info->nargs; ++n)
12645 struct cgraph_simd_clone_arg arg = clone_info->args[n];
12647 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
12648 pp_character (&pp, 'u');
12649 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
12651 gcc_assert (arg.linear_step != 0);
12652 pp_character (&pp, 'l');
12653 if (arg.linear_step > 1)
12654 pp_unsigned_wide_integer (&pp, arg.linear_step);
12655 else if (arg.linear_step < 0)
12657 pp_character (&pp, 'n');
12658 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
12659 arg.linear_step));
12662 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
12664 pp_character (&pp, 's');
12665 pp_unsigned_wide_integer (&pp, arg.linear_step);
12667 else
12668 pp_character (&pp, 'v');
12669 if (arg.alignment)
12671 pp_character (&pp, 'a');
12672 pp_decimal_int (&pp, arg.alignment);
12676 pp_underscore (&pp);
12677 const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl));
12678 if (*str == '*')
12679 ++str;
12680 pp_string (&pp, str);
12681 str = pp_formatted_text (&pp);
12683 /* If there already is a SIMD clone with the same mangled name, don't
12684 add another one. This can happen e.g. for
12685 #pragma omp declare simd
12686 #pragma omp declare simd simdlen(8)
12687 int foo (int, int);
12688 if the simdlen is assumed to be 8 for the first one, etc. */
12689 for (struct cgraph_node *clone = node->simd_clones; clone;
12690 clone = clone->simdclone->next_clone)
12691 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
12692 str) == 0)
12693 return NULL_TREE;
12695 return get_identifier (str);
12698 /* Create a simd clone of OLD_NODE and return it. */
12700 static struct cgraph_node *
12701 simd_clone_create (struct cgraph_node *old_node)
12703 struct cgraph_node *new_node;
12704 if (old_node->definition)
12706 if (!old_node->has_gimple_body_p ())
12707 return NULL;
12708 old_node->get_body ();
12709 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
12710 false, NULL, NULL,
12711 "simdclone");
12713 else
12715 tree old_decl = old_node->decl;
12716 tree new_decl = copy_node (old_node->decl);
12717 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
12718 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
12719 SET_DECL_RTL (new_decl, NULL);
12720 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
12721 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
12722 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
12723 symtab->call_cgraph_insertion_hooks (new_node);
12725 if (new_node == NULL)
12726 return new_node;
12728 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
12730 /* The function cgraph_function_versioning () will force the new
12731 symbol local. Undo this, and inherit external visability from
12732 the old node. */
12733 new_node->local.local = old_node->local.local;
12734 new_node->externally_visible = old_node->externally_visible;
12736 return new_node;
12739 /* Adjust the return type of the given function to its appropriate
12740 vector counterpart. Returns a simd array to be used throughout the
12741 function as a return value. */
12743 static tree
12744 simd_clone_adjust_return_type (struct cgraph_node *node)
12746 tree fndecl = node->decl;
12747 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
12748 unsigned int veclen;
12749 tree t;
12751 /* Adjust the function return type. */
12752 if (orig_rettype == void_type_node)
12753 return NULL_TREE;
12754 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
12755 t = TREE_TYPE (TREE_TYPE (fndecl));
12756 if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t))
12757 veclen = node->simdclone->vecsize_int;
12758 else
12759 veclen = node->simdclone->vecsize_float;
12760 veclen /= GET_MODE_BITSIZE (TYPE_MODE (t));
12761 if (veclen > node->simdclone->simdlen)
12762 veclen = node->simdclone->simdlen;
12763 if (POINTER_TYPE_P (t))
12764 t = pointer_sized_int_node;
12765 if (veclen == node->simdclone->simdlen)
12766 t = build_vector_type (t, node->simdclone->simdlen);
12767 else
12769 t = build_vector_type (t, veclen);
12770 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
12772 TREE_TYPE (TREE_TYPE (fndecl)) = t;
12773 if (!node->definition)
12774 return NULL_TREE;
12776 t = DECL_RESULT (fndecl);
12777 /* Adjust the DECL_RESULT. */
12778 gcc_assert (TREE_TYPE (t) != void_type_node);
12779 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
12780 relayout_decl (t);
12782 tree atype = build_array_type_nelts (orig_rettype,
12783 node->simdclone->simdlen);
12784 if (veclen != node->simdclone->simdlen)
12785 return build1 (VIEW_CONVERT_EXPR, atype, t);
12787 /* Set up a SIMD array to use as the return value. */
12788 tree retval = create_tmp_var_raw (atype, "retval");
12789 gimple_add_tmp_var (retval);
12790 return retval;
12793 /* Each vector argument has a corresponding array to be used locally
12794 as part of the eventual loop. Create such temporary array and
12795 return it.
12797 PREFIX is the prefix to be used for the temporary.
12799 TYPE is the inner element type.
12801 SIMDLEN is the number of elements. */
12803 static tree
12804 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
12806 tree atype = build_array_type_nelts (type, simdlen);
12807 tree avar = create_tmp_var_raw (atype, prefix);
12808 gimple_add_tmp_var (avar);
12809 return avar;
12812 /* Modify the function argument types to their corresponding vector
12813 counterparts if appropriate. Also, create one array for each simd
12814 argument to be used locally when using the function arguments as
12815 part of the loop.
12817 NODE is the function whose arguments are to be adjusted.
12819 Returns an adjustment vector that will be filled describing how the
12820 argument types will be adjusted. */
12822 static ipa_parm_adjustment_vec
12823 simd_clone_adjust_argument_types (struct cgraph_node *node)
12825 vec<tree> args;
12826 ipa_parm_adjustment_vec adjustments;
12828 if (node->definition)
12829 args = ipa_get_vector_of_formal_parms (node->decl);
12830 else
12831 args = simd_clone_vector_of_formal_parm_types (node->decl);
12832 adjustments.create (args.length ());
12833 unsigned i, j, veclen;
12834 struct ipa_parm_adjustment adj;
12835 for (i = 0; i < node->simdclone->nargs; ++i)
12837 memset (&adj, 0, sizeof (adj));
12838 tree parm = args[i];
12839 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
12840 adj.base_index = i;
12841 adj.base = parm;
12843 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
12844 node->simdclone->args[i].orig_type = parm_type;
12846 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
12848 /* No adjustment necessary for scalar arguments. */
12849 adj.op = IPA_PARM_OP_COPY;
12851 else
12853 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
12854 veclen = node->simdclone->vecsize_int;
12855 else
12856 veclen = node->simdclone->vecsize_float;
12857 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
12858 if (veclen > node->simdclone->simdlen)
12859 veclen = node->simdclone->simdlen;
12860 adj.arg_prefix = "simd";
12861 if (POINTER_TYPE_P (parm_type))
12862 adj.type = build_vector_type (pointer_sized_int_node, veclen);
12863 else
12864 adj.type = build_vector_type (parm_type, veclen);
12865 node->simdclone->args[i].vector_type = adj.type;
12866 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
12868 adjustments.safe_push (adj);
12869 if (j == veclen)
12871 memset (&adj, 0, sizeof (adj));
12872 adj.op = IPA_PARM_OP_NEW;
12873 adj.arg_prefix = "simd";
12874 adj.base_index = i;
12875 adj.type = node->simdclone->args[i].vector_type;
12879 if (node->definition)
12880 node->simdclone->args[i].simd_array
12881 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
12882 parm_type, node->simdclone->simdlen);
12884 adjustments.safe_push (adj);
12887 if (node->simdclone->inbranch)
12889 tree base_type
12890 = simd_clone_compute_base_data_type (node->simdclone->origin,
12891 node->simdclone);
12893 memset (&adj, 0, sizeof (adj));
12894 adj.op = IPA_PARM_OP_NEW;
12895 adj.arg_prefix = "mask";
12897 adj.base_index = i;
12898 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
12899 veclen = node->simdclone->vecsize_int;
12900 else
12901 veclen = node->simdclone->vecsize_float;
12902 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
12903 if (veclen > node->simdclone->simdlen)
12904 veclen = node->simdclone->simdlen;
12905 if (POINTER_TYPE_P (base_type))
12906 adj.type = build_vector_type (pointer_sized_int_node, veclen);
12907 else
12908 adj.type = build_vector_type (base_type, veclen);
12909 adjustments.safe_push (adj);
12911 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
12912 adjustments.safe_push (adj);
12914 /* We have previously allocated one extra entry for the mask. Use
12915 it and fill it. */
12916 struct cgraph_simd_clone *sc = node->simdclone;
12917 sc->nargs++;
12918 if (node->definition)
12920 sc->args[i].orig_arg
12921 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
12922 sc->args[i].simd_array
12923 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
12925 sc->args[i].orig_type = base_type;
12926 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
12929 if (node->definition)
12930 ipa_modify_formal_parameters (node->decl, adjustments);
12931 else
12933 tree new_arg_types = NULL_TREE, new_reversed;
12934 bool last_parm_void = false;
12935 if (args.length () > 0 && args.last () == void_type_node)
12936 last_parm_void = true;
12938 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
12939 j = adjustments.length ();
12940 for (i = 0; i < j; i++)
12942 struct ipa_parm_adjustment *adj = &adjustments[i];
12943 tree ptype;
12944 if (adj->op == IPA_PARM_OP_COPY)
12945 ptype = args[adj->base_index];
12946 else
12947 ptype = adj->type;
12948 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
12950 new_reversed = nreverse (new_arg_types);
12951 if (last_parm_void)
12953 if (new_reversed)
12954 TREE_CHAIN (new_arg_types) = void_list_node;
12955 else
12956 new_reversed = void_list_node;
12959 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
12960 TYPE_ARG_TYPES (new_type) = new_reversed;
12961 TREE_TYPE (node->decl) = new_type;
12963 adjustments.release ();
12965 args.release ();
12966 return adjustments;
12969 /* Initialize and copy the function arguments in NODE to their
12970 corresponding local simd arrays. Returns a fresh gimple_seq with
12971 the instruction sequence generated. */
12973 static gimple_seq
12974 simd_clone_init_simd_arrays (struct cgraph_node *node,
12975 ipa_parm_adjustment_vec adjustments)
12977 gimple_seq seq = NULL;
12978 unsigned i = 0, j = 0, k;
12980 for (tree arg = DECL_ARGUMENTS (node->decl);
12981 arg;
12982 arg = DECL_CHAIN (arg), i++, j++)
12984 if (adjustments[j].op == IPA_PARM_OP_COPY)
12985 continue;
12987 node->simdclone->args[i].vector_arg = arg;
12989 tree array = node->simdclone->args[i].simd_array;
12990 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
12992 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
12993 tree ptr = build_fold_addr_expr (array);
12994 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
12995 build_int_cst (ptype, 0));
12996 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
12997 gimplify_and_add (t, &seq);
12999 else
13001 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
13002 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
13003 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
13005 tree ptr = build_fold_addr_expr (array);
13006 int elemsize;
13007 if (k)
13009 arg = DECL_CHAIN (arg);
13010 j++;
13012 elemsize
13013 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
13014 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
13015 build_int_cst (ptype, k * elemsize));
13016 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
13017 gimplify_and_add (t, &seq);
13021 return seq;
13024 /* Callback info for ipa_simd_modify_stmt_ops below. */
13026 struct modify_stmt_info {
13027 ipa_parm_adjustment_vec adjustments;
13028 gimple stmt;
13029 /* True if the parent statement was modified by
13030 ipa_simd_modify_stmt_ops. */
13031 bool modified;
13034 /* Callback for walk_gimple_op.
13036 Adjust operands from a given statement as specified in the
13037 adjustments vector in the callback data. */
13039 static tree
13040 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
13042 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
13043 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
13044 tree *orig_tp = tp;
13045 if (TREE_CODE (*tp) == ADDR_EXPR)
13046 tp = &TREE_OPERAND (*tp, 0);
13047 struct ipa_parm_adjustment *cand = NULL;
13048 if (TREE_CODE (*tp) == PARM_DECL)
13049 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
13050 else
13052 if (TYPE_P (*tp))
13053 *walk_subtrees = 0;
13056 tree repl = NULL_TREE;
13057 if (cand)
13058 repl = unshare_expr (cand->new_decl);
13059 else
13061 if (tp != orig_tp)
13063 *walk_subtrees = 0;
13064 bool modified = info->modified;
13065 info->modified = false;
13066 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
13067 if (!info->modified)
13069 info->modified = modified;
13070 return NULL_TREE;
13072 info->modified = modified;
13073 repl = *tp;
13075 else
13076 return NULL_TREE;
13079 if (tp != orig_tp)
13081 repl = build_fold_addr_expr (repl);
13082 gimple stmt;
13083 if (is_gimple_debug (info->stmt))
13085 tree vexpr = make_node (DEBUG_EXPR_DECL);
13086 stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
13087 DECL_ARTIFICIAL (vexpr) = 1;
13088 TREE_TYPE (vexpr) = TREE_TYPE (repl);
13089 DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (repl));
13090 repl = vexpr;
13092 else
13094 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl);
13095 repl = gimple_assign_lhs (stmt);
13097 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
13098 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
13099 *orig_tp = repl;
13101 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
13103 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
13104 *tp = vce;
13106 else
13107 *tp = repl;
13109 info->modified = true;
13110 return NULL_TREE;
13113 /* Traverse the function body and perform all modifications as
13114 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
13115 modified such that the replacement/reduction value will now be an
13116 offset into the corresponding simd_array.
13118 This function will replace all function argument uses with their
13119 corresponding simd array elements, and ajust the return values
13120 accordingly. */
13122 static void
13123 ipa_simd_modify_function_body (struct cgraph_node *node,
13124 ipa_parm_adjustment_vec adjustments,
13125 tree retval_array, tree iter)
13127 basic_block bb;
13128 unsigned int i, j, l;
13130 /* Re-use the adjustments array, but this time use it to replace
13131 every function argument use to an offset into the corresponding
13132 simd_array. */
13133 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
13135 if (!node->simdclone->args[i].vector_arg)
13136 continue;
13138 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
13139 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
13140 adjustments[j].new_decl
13141 = build4 (ARRAY_REF,
13142 basetype,
13143 node->simdclone->args[i].simd_array,
13144 iter,
13145 NULL_TREE, NULL_TREE);
13146 if (adjustments[j].op == IPA_PARM_OP_NONE
13147 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
13148 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
13151 l = adjustments.length ();
13152 for (i = 1; i < num_ssa_names; i++)
13154 tree name = ssa_name (i);
13155 if (name
13156 && SSA_NAME_VAR (name)
13157 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
13159 for (j = 0; j < l; j++)
13160 if (SSA_NAME_VAR (name) == adjustments[j].base
13161 && adjustments[j].new_decl)
13163 tree base_var;
13164 if (adjustments[j].new_ssa_base == NULL_TREE)
13166 base_var
13167 = copy_var_decl (adjustments[j].base,
13168 DECL_NAME (adjustments[j].base),
13169 TREE_TYPE (adjustments[j].base));
13170 adjustments[j].new_ssa_base = base_var;
13172 else
13173 base_var = adjustments[j].new_ssa_base;
13174 if (SSA_NAME_IS_DEFAULT_DEF (name))
13176 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13177 gimple_stmt_iterator gsi = gsi_after_labels (bb);
13178 tree new_decl = unshare_expr (adjustments[j].new_decl);
13179 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
13180 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
13181 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
13182 gimple stmt = gimple_build_assign (name, new_decl);
13183 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
13185 else
13186 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
13191 struct modify_stmt_info info;
13192 info.adjustments = adjustments;
13194 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
13196 gimple_stmt_iterator gsi;
13198 gsi = gsi_start_bb (bb);
13199 while (!gsi_end_p (gsi))
13201 gimple stmt = gsi_stmt (gsi);
13202 info.stmt = stmt;
13203 struct walk_stmt_info wi;
13205 memset (&wi, 0, sizeof (wi));
13206 info.modified = false;
13207 wi.info = &info;
13208 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
13210 if (greturn *return_stmt = dyn_cast <greturn *> (stmt))
13212 tree retval = gimple_return_retval (return_stmt);
13213 if (!retval)
13215 gsi_remove (&gsi, true);
13216 continue;
13219 /* Replace `return foo' with `retval_array[iter] = foo'. */
13220 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
13221 retval_array, iter, NULL, NULL);
13222 stmt = gimple_build_assign (ref, retval);
13223 gsi_replace (&gsi, stmt, true);
13224 info.modified = true;
13227 if (info.modified)
13229 update_stmt (stmt);
13230 if (maybe_clean_eh_stmt (stmt))
13231 gimple_purge_dead_eh_edges (gimple_bb (stmt));
13233 gsi_next (&gsi);
13238 /* Adjust the argument types in NODE to their appropriate vector
13239 counterparts. */
13241 static void
13242 simd_clone_adjust (struct cgraph_node *node)
13244 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
13246 targetm.simd_clone.adjust (node);
13248 tree retval = simd_clone_adjust_return_type (node);
13249 ipa_parm_adjustment_vec adjustments
13250 = simd_clone_adjust_argument_types (node);
13252 push_gimplify_context ();
13254 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
13256 /* Adjust all uses of vector arguments accordingly. Adjust all
13257 return values accordingly. */
13258 tree iter = create_tmp_var (unsigned_type_node, "iter");
13259 tree iter1 = make_ssa_name (iter);
13260 tree iter2 = make_ssa_name (iter);
13261 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
13263 /* Initialize the iteration variable. */
13264 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13265 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
13266 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
13267 /* Insert the SIMD array and iv initialization at function
13268 entry. */
13269 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
13271 pop_gimplify_context (NULL);
13273 /* Create a new BB right before the original exit BB, to hold the
13274 iteration increment and the condition/branch. */
13275 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
13276 basic_block incr_bb = create_empty_bb (orig_exit);
13277 add_bb_to_loop (incr_bb, body_bb->loop_father);
13278 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
13279 flag. Set it now to be a FALLTHRU_EDGE. */
13280 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
13281 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
13282 for (unsigned i = 0;
13283 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
13285 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
13286 redirect_edge_succ (e, incr_bb);
13288 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
13289 e->probability = REG_BR_PROB_BASE;
13290 gsi = gsi_last_bb (incr_bb);
13291 gimple g = gimple_build_assign (iter2, PLUS_EXPR, iter1,
13292 build_int_cst (unsigned_type_node, 1));
13293 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13295 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
13296 struct loop *loop = alloc_loop ();
13297 cfun->has_force_vectorize_loops = true;
13298 loop->safelen = node->simdclone->simdlen;
13299 loop->force_vectorize = true;
13300 loop->header = body_bb;
13302 /* Branch around the body if the mask applies. */
13303 if (node->simdclone->inbranch)
13305 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
13306 tree mask_array
13307 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
13308 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
13309 tree aref = build4 (ARRAY_REF,
13310 TREE_TYPE (TREE_TYPE (mask_array)),
13311 mask_array, iter1,
13312 NULL, NULL);
13313 g = gimple_build_assign (mask, aref);
13314 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13315 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
13316 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
13318 aref = build1 (VIEW_CONVERT_EXPR,
13319 build_nonstandard_integer_type (bitsize, 0), mask);
13320 mask = make_ssa_name (TREE_TYPE (aref));
13321 g = gimple_build_assign (mask, aref);
13322 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13325 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
13326 NULL, NULL);
13327 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13328 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
13329 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
13332 /* Generate the condition. */
13333 g = gimple_build_cond (LT_EXPR,
13334 iter2,
13335 build_int_cst (unsigned_type_node,
13336 node->simdclone->simdlen),
13337 NULL, NULL);
13338 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13339 e = split_block (incr_bb, gsi_stmt (gsi));
13340 basic_block latch_bb = e->dest;
13341 basic_block new_exit_bb;
13342 new_exit_bb = split_block_after_labels (latch_bb)->dest;
13343 loop->latch = latch_bb;
13345 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
13347 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
13348 /* The successor of incr_bb is already pointing to latch_bb; just
13349 change the flags.
13350 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
13351 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
13353 gphi *phi = create_phi_node (iter1, body_bb);
13354 edge preheader_edge = find_edge (entry_bb, body_bb);
13355 edge latch_edge = single_succ_edge (latch_bb);
13356 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
13357 UNKNOWN_LOCATION);
13358 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
13360 /* Generate the new return. */
13361 gsi = gsi_last_bb (new_exit_bb);
13362 if (retval
13363 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
13364 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
13365 retval = TREE_OPERAND (retval, 0);
13366 else if (retval)
13368 retval = build1 (VIEW_CONVERT_EXPR,
13369 TREE_TYPE (TREE_TYPE (node->decl)),
13370 retval);
13371 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
13372 false, GSI_CONTINUE_LINKING);
13374 g = gimple_build_return (retval);
13375 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13377 /* Handle aligned clauses by replacing default defs of the aligned
13378 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
13379 lhs. Handle linear by adding PHIs. */
13380 for (unsigned i = 0; i < node->simdclone->nargs; i++)
13381 if (node->simdclone->args[i].alignment
13382 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
13383 && (node->simdclone->args[i].alignment
13384 & (node->simdclone->args[i].alignment - 1)) == 0
13385 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
13386 == POINTER_TYPE)
13388 unsigned int alignment = node->simdclone->args[i].alignment;
13389 tree orig_arg = node->simdclone->args[i].orig_arg;
13390 tree def = ssa_default_def (cfun, orig_arg);
13391 if (def && !has_zero_uses (def))
13393 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
13394 gimple_seq seq = NULL;
13395 bool need_cvt = false;
13396 gcall *call
13397 = gimple_build_call (fn, 2, def, size_int (alignment));
13398 g = call;
13399 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
13400 ptr_type_node))
13401 need_cvt = true;
13402 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg);
13403 gimple_call_set_lhs (g, t);
13404 gimple_seq_add_stmt_without_update (&seq, g);
13405 if (need_cvt)
13407 t = make_ssa_name (orig_arg);
13408 g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g));
13409 gimple_seq_add_stmt_without_update (&seq, g);
13411 gsi_insert_seq_on_edge_immediate
13412 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
13414 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13415 int freq = compute_call_stmt_bb_frequency (current_function_decl,
13416 entry_bb);
13417 node->create_edge (cgraph_node::get_create (fn),
13418 call, entry_bb->count, freq);
13420 imm_use_iterator iter;
13421 use_operand_p use_p;
13422 gimple use_stmt;
13423 tree repl = gimple_get_lhs (g);
13424 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
13425 if (is_gimple_debug (use_stmt) || use_stmt == call)
13426 continue;
13427 else
13428 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
13429 SET_USE (use_p, repl);
13432 else if (node->simdclone->args[i].arg_type
13433 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
13435 tree orig_arg = node->simdclone->args[i].orig_arg;
13436 tree def = ssa_default_def (cfun, orig_arg);
13437 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
13438 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
13439 if (def && !has_zero_uses (def))
13441 iter1 = make_ssa_name (orig_arg);
13442 iter2 = make_ssa_name (orig_arg);
13443 phi = create_phi_node (iter1, body_bb);
13444 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
13445 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
13446 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
13447 ? PLUS_EXPR : POINTER_PLUS_EXPR;
13448 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
13449 ? TREE_TYPE (orig_arg) : sizetype;
13450 tree addcst
13451 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
13452 g = gimple_build_assign (iter2, code, iter1, addcst);
13453 gsi = gsi_last_bb (incr_bb);
13454 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
13456 imm_use_iterator iter;
13457 use_operand_p use_p;
13458 gimple use_stmt;
13459 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
13460 if (use_stmt == phi)
13461 continue;
13462 else
13463 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
13464 SET_USE (use_p, iter1);
13468 calculate_dominance_info (CDI_DOMINATORS);
13469 add_loop (loop, loop->header->loop_father);
13470 update_ssa (TODO_update_ssa);
13472 pop_cfun ();
13475 /* If the function in NODE is tagged as an elemental SIMD function,
13476 create the appropriate SIMD clones. */
13478 static void
13479 expand_simd_clones (struct cgraph_node *node)
13481 tree attr = lookup_attribute ("omp declare simd",
13482 DECL_ATTRIBUTES (node->decl));
13483 if (attr == NULL_TREE
13484 || node->global.inlined_to
13485 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
13486 return;
13488 /* Ignore
13489 #pragma omp declare simd
13490 extern int foo ();
13491 in C, there we don't know the argument types at all. */
13492 if (!node->definition
13493 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
13494 return;
13498 /* Start with parsing the "omp declare simd" attribute(s). */
13499 bool inbranch_clause_specified;
13500 struct cgraph_simd_clone *clone_info
13501 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
13502 &inbranch_clause_specified);
13503 if (clone_info == NULL)
13504 continue;
13506 int orig_simdlen = clone_info->simdlen;
13507 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
13508 /* The target can return 0 (no simd clones should be created),
13509 1 (just one ISA of simd clones should be created) or higher
13510 count of ISA variants. In that case, clone_info is initialized
13511 for the first ISA variant. */
13512 int count
13513 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
13514 base_type, 0);
13515 if (count == 0)
13516 continue;
13518 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
13519 also create one inbranch and one !inbranch clone of it. */
13520 for (int i = 0; i < count * 2; i++)
13522 struct cgraph_simd_clone *clone = clone_info;
13523 if (inbranch_clause_specified && (i & 1) != 0)
13524 continue;
13526 if (i != 0)
13528 clone = simd_clone_struct_alloc (clone_info->nargs
13529 + ((i & 1) != 0));
13530 simd_clone_struct_copy (clone, clone_info);
13531 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
13532 and simd_clone_adjust_argument_types did to the first
13533 clone's info. */
13534 clone->nargs -= clone_info->inbranch;
13535 clone->simdlen = orig_simdlen;
13536 /* And call the target hook again to get the right ISA. */
13537 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
13538 base_type,
13539 i / 2);
13540 if ((i & 1) != 0)
13541 clone->inbranch = 1;
13544 /* simd_clone_mangle might fail if such a clone has been created
13545 already. */
13546 tree id = simd_clone_mangle (node, clone);
13547 if (id == NULL_TREE)
13548 continue;
13550 /* Only when we are sure we want to create the clone actually
13551 clone the function (or definitions) or create another
13552 extern FUNCTION_DECL (for prototypes without definitions). */
13553 struct cgraph_node *n = simd_clone_create (node);
13554 if (n == NULL)
13555 continue;
13557 n->simdclone = clone;
13558 clone->origin = node;
13559 clone->next_clone = NULL;
13560 if (node->simd_clones == NULL)
13562 clone->prev_clone = n;
13563 node->simd_clones = n;
13565 else
13567 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
13568 clone->prev_clone->simdclone->next_clone = n;
13569 node->simd_clones->simdclone->prev_clone = n;
13571 symtab->change_decl_assembler_name (n->decl, id);
13572 /* And finally adjust the return type, parameters and for
13573 definitions also function body. */
13574 if (node->definition)
13575 simd_clone_adjust (n);
13576 else
13578 simd_clone_adjust_return_type (n);
13579 simd_clone_adjust_argument_types (n);
13583 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
13586 /* Entry point for IPA simd clone creation pass. */
13588 static unsigned int
13589 ipa_omp_simd_clone (void)
13591 struct cgraph_node *node;
13592 FOR_EACH_FUNCTION (node)
13593 expand_simd_clones (node);
13594 return 0;
13597 namespace {
13599 const pass_data pass_data_omp_simd_clone =
13601 SIMPLE_IPA_PASS, /* type */
13602 "simdclone", /* name */
13603 OPTGROUP_NONE, /* optinfo_flags */
13604 TV_NONE, /* tv_id */
13605 ( PROP_ssa | PROP_cfg ), /* properties_required */
13606 0, /* properties_provided */
13607 0, /* properties_destroyed */
13608 0, /* todo_flags_start */
13609 0, /* todo_flags_finish */
13612 class pass_omp_simd_clone : public simple_ipa_opt_pass
13614 public:
13615 pass_omp_simd_clone(gcc::context *ctxt)
13616 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
13619 /* opt_pass methods: */
13620 virtual bool gate (function *);
13621 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
13624 bool
13625 pass_omp_simd_clone::gate (function *)
13627 return ((flag_openmp || flag_openmp_simd
13628 || flag_cilkplus
13629 || (in_lto_p && !flag_wpa))
13630 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
13633 } // anon namespace
13635 simple_ipa_opt_pass *
13636 make_pass_omp_simd_clone (gcc::context *ctxt)
13638 return new pass_omp_simd_clone (ctxt);
13641 /* Helper function for omp_finish_file routine. Takes decls from V_DECLS and
13642 adds their addresses and sizes to constructor-vector V_CTOR. */
13643 static void
13644 add_decls_addresses_to_decl_constructor (vec<tree, va_gc> *v_decls,
13645 vec<constructor_elt, va_gc> *v_ctor)
13647 unsigned len = vec_safe_length (v_decls);
13648 for (unsigned i = 0; i < len; i++)
13650 tree it = (*v_decls)[i];
13651 bool is_function = TREE_CODE (it) != VAR_DECL;
13653 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE, build_fold_addr_expr (it));
13654 if (!is_function)
13655 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE,
13656 fold_convert (const_ptr_type_node,
13657 DECL_SIZE_UNIT (it)));
13661 /* Create new symbols containing (address, size) pairs for global variables,
13662 marked with "omp declare target" attribute, as well as addresses for the
13663 functions, which are outlined offloading regions. */
13664 void
13665 omp_finish_file (void)
13667 unsigned num_funcs = vec_safe_length (offload_funcs);
13668 unsigned num_vars = vec_safe_length (offload_vars);
13670 if (num_funcs == 0 && num_vars == 0)
13671 return;
13673 if (targetm_common.have_named_sections)
13675 vec<constructor_elt, va_gc> *v_f, *v_v;
13676 vec_alloc (v_f, num_funcs);
13677 vec_alloc (v_v, num_vars * 2);
13679 add_decls_addresses_to_decl_constructor (offload_funcs, v_f);
13680 add_decls_addresses_to_decl_constructor (offload_vars, v_v);
13682 tree vars_decl_type = build_array_type_nelts (pointer_sized_int_node,
13683 num_vars * 2);
13684 tree funcs_decl_type = build_array_type_nelts (pointer_sized_int_node,
13685 num_funcs);
13686 TYPE_ALIGN (vars_decl_type) = TYPE_ALIGN (pointer_sized_int_node);
13687 TYPE_ALIGN (funcs_decl_type) = TYPE_ALIGN (pointer_sized_int_node);
13688 tree ctor_v = build_constructor (vars_decl_type, v_v);
13689 tree ctor_f = build_constructor (funcs_decl_type, v_f);
13690 TREE_CONSTANT (ctor_v) = TREE_CONSTANT (ctor_f) = 1;
13691 TREE_STATIC (ctor_v) = TREE_STATIC (ctor_f) = 1;
13692 tree funcs_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
13693 get_identifier (".offload_func_table"),
13694 funcs_decl_type);
13695 tree vars_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
13696 get_identifier (".offload_var_table"),
13697 vars_decl_type);
13698 TREE_STATIC (funcs_decl) = TREE_STATIC (vars_decl) = 1;
13699 /* Do not align tables more than TYPE_ALIGN (pointer_sized_int_node),
13700 otherwise a joint table in a binary will contain padding between
13701 tables from multiple object files. */
13702 DECL_USER_ALIGN (funcs_decl) = DECL_USER_ALIGN (vars_decl) = 1;
13703 DECL_ALIGN (funcs_decl) = TYPE_ALIGN (funcs_decl_type);
13704 DECL_ALIGN (vars_decl) = TYPE_ALIGN (vars_decl_type);
13705 DECL_INITIAL (funcs_decl) = ctor_f;
13706 DECL_INITIAL (vars_decl) = ctor_v;
13707 set_decl_section_name (funcs_decl, OFFLOAD_FUNC_TABLE_SECTION_NAME);
13708 set_decl_section_name (vars_decl, OFFLOAD_VAR_TABLE_SECTION_NAME);
13710 varpool_node::finalize_decl (vars_decl);
13711 varpool_node::finalize_decl (funcs_decl);
13713 else
13715 for (unsigned i = 0; i < num_funcs; i++)
13717 tree it = (*offload_funcs)[i];
13718 targetm.record_offload_symbol (it);
13720 for (unsigned i = 0; i < num_vars; i++)
13722 tree it = (*offload_vars)[i];
13723 targetm.record_offload_symbol (it);
13728 #include "gt-omp-low.h"