* xvasprintf.c: New file.
[official-gcc.git] / gcc / omp-low.c
blobef143ab5651c90ff0903336c94a545ea6f36a44c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "rtl.h"
32 #include "predict.h"
33 #include "vec.h"
34 #include "hashtab.h"
35 #include "hash-set.h"
36 #include "machmode.h"
37 #include "hard-reg-set.h"
38 #include "input.h"
39 #include "function.h"
40 #include "dominance.h"
41 #include "cfg.h"
42 #include "cfganal.h"
43 #include "basic-block.h"
44 #include "tree-ssa-alias.h"
45 #include "internal-fn.h"
46 #include "gimple-fold.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimplify.h"
51 #include "gimple-iterator.h"
52 #include "gimplify-me.h"
53 #include "gimple-walk.h"
54 #include "tree-iterator.h"
55 #include "tree-inline.h"
56 #include "langhooks.h"
57 #include "diagnostic-core.h"
58 #include "gimple-ssa.h"
59 #include "hash-map.h"
60 #include "plugin-api.h"
61 #include "ipa-ref.h"
62 #include "cgraph.h"
63 #include "tree-cfg.h"
64 #include "tree-phinodes.h"
65 #include "ssa-iterators.h"
66 #include "tree-ssanames.h"
67 #include "tree-into-ssa.h"
68 #include "expr.h"
69 #include "tree-dfa.h"
70 #include "tree-ssa.h"
71 #include "flags.h"
72 #include "expr.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #include "splay-tree.h"
76 #include "insn-codes.h"
77 #include "optabs.h"
78 #include "cfgloop.h"
79 #include "target.h"
80 #include "common/common-target.h"
81 #include "omp-low.h"
82 #include "gimple-low.h"
83 #include "tree-cfgcleanup.h"
84 #include "pretty-print.h"
85 #include "alloc-pool.h"
86 #include "ipa-prop.h"
87 #include "tree-nested.h"
88 #include "tree-eh.h"
89 #include "cilk.h"
90 #include "context.h"
91 #include "lto-section-names.h"
94 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
95 phases. The first phase scans the function looking for OMP statements
96 and then for variables that must be replaced to satisfy data sharing
97 clauses. The second phase expands code for the constructs, as well as
98 re-gimplifying things when variables have been replaced with complex
99 expressions.
101 Final code generation is done by pass_expand_omp. The flowgraph is
102 scanned for parallel regions which are then moved to a new
103 function, to be invoked by the thread library. */
105 /* Parallel region information. Every parallel and workshare
106 directive is enclosed between two markers, the OMP_* directive
107 and a corresponding OMP_RETURN statement. */
109 struct omp_region
111 /* The enclosing region. */
112 struct omp_region *outer;
114 /* First child region. */
115 struct omp_region *inner;
117 /* Next peer region. */
118 struct omp_region *next;
120 /* Block containing the omp directive as its last stmt. */
121 basic_block entry;
123 /* Block containing the OMP_RETURN as its last stmt. */
124 basic_block exit;
126 /* Block containing the OMP_CONTINUE as its last stmt. */
127 basic_block cont;
129 /* If this is a combined parallel+workshare region, this is a list
130 of additional arguments needed by the combined parallel+workshare
131 library call. */
132 vec<tree, va_gc> *ws_args;
134 /* The code for the omp directive of this region. */
135 enum gimple_code type;
137 /* Schedule kind, only used for OMP_FOR type regions. */
138 enum omp_clause_schedule_kind sched_kind;
140 /* True if this is a combined parallel+workshare region. */
141 bool is_combined_parallel;
144 /* Context structure. Used to store information about each parallel
145 directive in the code. */
147 typedef struct omp_context
149 /* This field must be at the beginning, as we do "inheritance": Some
150 callback functions for tree-inline.c (e.g., omp_copy_decl)
151 receive a copy_body_data pointer that is up-casted to an
152 omp_context pointer. */
153 copy_body_data cb;
155 /* The tree of contexts corresponding to the encountered constructs. */
156 struct omp_context *outer;
157 gimple stmt;
159 /* Map variables to fields in a structure that allows communication
160 between sending and receiving threads. */
161 splay_tree field_map;
162 tree record_type;
163 tree sender_decl;
164 tree receiver_decl;
166 /* These are used just by task contexts, if task firstprivate fn is
167 needed. srecord_type is used to communicate from the thread
168 that encountered the task construct to task firstprivate fn,
169 record_type is allocated by GOMP_task, initialized by task firstprivate
170 fn and passed to the task body fn. */
171 splay_tree sfield_map;
172 tree srecord_type;
174 /* A chain of variables to add to the top-level block surrounding the
175 construct. In the case of a parallel, this is in the child function. */
176 tree block_vars;
178 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
179 barriers should jump to during omplower pass. */
180 tree cancel_label;
182 /* What to do with variables with implicitly determined sharing
183 attributes. */
184 enum omp_clause_default_kind default_kind;
186 /* Nesting depth of this context. Used to beautify error messages re
187 invalid gotos. The outermost ctx is depth 1, with depth 0 being
188 reserved for the main body of the function. */
189 int depth;
191 /* True if this parallel directive is nested within another. */
192 bool is_nested;
194 /* True if this construct can be cancelled. */
195 bool cancellable;
196 } omp_context;
199 struct omp_for_data_loop
201 tree v, n1, n2, step;
202 enum tree_code cond_code;
205 /* A structure describing the main elements of a parallel loop. */
207 struct omp_for_data
209 struct omp_for_data_loop loop;
210 tree chunk_size;
211 gomp_for *for_stmt;
212 tree pre, iter_type;
213 int collapse;
214 bool have_nowait, have_ordered;
215 enum omp_clause_schedule_kind sched_kind;
216 struct omp_for_data_loop *loops;
220 static splay_tree all_contexts;
221 static int taskreg_nesting_level;
222 static int target_nesting_level;
223 static struct omp_region *root_omp_region;
224 static bitmap task_shared_vars;
225 static vec<omp_context *> taskreg_contexts;
227 static void scan_omp (gimple_seq *, omp_context *);
228 static tree scan_omp_1_op (tree *, int *, void *);
230 #define WALK_SUBSTMTS \
231 case GIMPLE_BIND: \
232 case GIMPLE_TRY: \
233 case GIMPLE_CATCH: \
234 case GIMPLE_EH_FILTER: \
235 case GIMPLE_TRANSACTION: \
236 /* The sub-statements for these should be walked. */ \
237 *handled_ops_p = false; \
238 break;
240 /* Holds offload tables with decls. */
241 vec<tree, va_gc> *offload_funcs, *offload_vars;
243 /* Convenience function for calling scan_omp_1_op on tree operands. */
245 static inline tree
246 scan_omp_op (tree *tp, omp_context *ctx)
248 struct walk_stmt_info wi;
250 memset (&wi, 0, sizeof (wi));
251 wi.info = ctx;
252 wi.want_locations = true;
254 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
257 static void lower_omp (gimple_seq *, omp_context *);
258 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
259 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
261 /* Find an OpenMP clause of type KIND within CLAUSES. */
263 tree
264 find_omp_clause (tree clauses, enum omp_clause_code kind)
266 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
267 if (OMP_CLAUSE_CODE (clauses) == kind)
268 return clauses;
270 return NULL_TREE;
273 /* Return true if CTX is for an omp parallel. */
275 static inline bool
276 is_parallel_ctx (omp_context *ctx)
278 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
282 /* Return true if CTX is for an omp target region. */
284 static inline bool
285 is_targetreg_ctx (omp_context *ctx)
287 return gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
288 && gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_REGION;
292 /* Return true if CTX is for an omp task. */
294 static inline bool
295 is_task_ctx (omp_context *ctx)
297 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
301 /* Return true if CTX is for an omp parallel or omp task. */
303 static inline bool
304 is_taskreg_ctx (omp_context *ctx)
306 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
307 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
311 /* Return true if REGION is a combined parallel+workshare region. */
313 static inline bool
314 is_combined_parallel (struct omp_region *region)
316 return region->is_combined_parallel;
320 /* Extract the header elements of parallel loop FOR_STMT and store
321 them into *FD. */
323 static void
324 extract_omp_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
325 struct omp_for_data_loop *loops)
327 tree t, var, *collapse_iter, *collapse_count;
328 tree count = NULL_TREE, iter_type = long_integer_type_node;
329 struct omp_for_data_loop *loop;
330 int i;
331 struct omp_for_data_loop dummy_loop;
332 location_t loc = gimple_location (for_stmt);
333 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
334 bool distribute = gimple_omp_for_kind (for_stmt)
335 == GF_OMP_FOR_KIND_DISTRIBUTE;
337 fd->for_stmt = for_stmt;
338 fd->pre = NULL;
339 fd->collapse = gimple_omp_for_collapse (for_stmt);
340 if (fd->collapse > 1)
341 fd->loops = loops;
342 else
343 fd->loops = &fd->loop;
345 fd->have_nowait = distribute || simd;
346 fd->have_ordered = false;
347 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
348 fd->chunk_size = NULL_TREE;
349 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
350 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
351 collapse_iter = NULL;
352 collapse_count = NULL;
354 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
355 switch (OMP_CLAUSE_CODE (t))
357 case OMP_CLAUSE_NOWAIT:
358 fd->have_nowait = true;
359 break;
360 case OMP_CLAUSE_ORDERED:
361 fd->have_ordered = true;
362 break;
363 case OMP_CLAUSE_SCHEDULE:
364 gcc_assert (!distribute);
365 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
366 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
367 break;
368 case OMP_CLAUSE_DIST_SCHEDULE:
369 gcc_assert (distribute);
370 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
371 break;
372 case OMP_CLAUSE_COLLAPSE:
373 if (fd->collapse > 1)
375 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
376 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
378 break;
379 default:
380 break;
383 /* FIXME: for now map schedule(auto) to schedule(static).
384 There should be analysis to determine whether all iterations
385 are approximately the same amount of work (then schedule(static)
386 is best) or if it varies (then schedule(dynamic,N) is better). */
387 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
389 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
390 gcc_assert (fd->chunk_size == NULL);
392 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
393 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
394 gcc_assert (fd->chunk_size == NULL);
395 else if (fd->chunk_size == NULL)
397 /* We only need to compute a default chunk size for ordered
398 static loops and dynamic loops. */
399 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
400 || fd->have_ordered)
401 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
402 ? integer_zero_node : integer_one_node;
405 for (i = 0; i < fd->collapse; i++)
407 if (fd->collapse == 1)
408 loop = &fd->loop;
409 else if (loops != NULL)
410 loop = loops + i;
411 else
412 loop = &dummy_loop;
414 loop->v = gimple_omp_for_index (for_stmt, i);
415 gcc_assert (SSA_VAR_P (loop->v));
416 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
417 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
418 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
419 loop->n1 = gimple_omp_for_initial (for_stmt, i);
421 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
422 loop->n2 = gimple_omp_for_final (for_stmt, i);
423 switch (loop->cond_code)
425 case LT_EXPR:
426 case GT_EXPR:
427 break;
428 case NE_EXPR:
429 gcc_assert (gimple_omp_for_kind (for_stmt)
430 == GF_OMP_FOR_KIND_CILKSIMD
431 || (gimple_omp_for_kind (for_stmt)
432 == GF_OMP_FOR_KIND_CILKFOR));
433 break;
434 case LE_EXPR:
435 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
436 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
437 else
438 loop->n2 = fold_build2_loc (loc,
439 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
440 build_int_cst (TREE_TYPE (loop->n2), 1));
441 loop->cond_code = LT_EXPR;
442 break;
443 case GE_EXPR:
444 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
445 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
446 else
447 loop->n2 = fold_build2_loc (loc,
448 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
449 build_int_cst (TREE_TYPE (loop->n2), 1));
450 loop->cond_code = GT_EXPR;
451 break;
452 default:
453 gcc_unreachable ();
456 t = gimple_omp_for_incr (for_stmt, i);
457 gcc_assert (TREE_OPERAND (t, 0) == var);
458 switch (TREE_CODE (t))
460 case PLUS_EXPR:
461 loop->step = TREE_OPERAND (t, 1);
462 break;
463 case POINTER_PLUS_EXPR:
464 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
465 break;
466 case MINUS_EXPR:
467 loop->step = TREE_OPERAND (t, 1);
468 loop->step = fold_build1_loc (loc,
469 NEGATE_EXPR, TREE_TYPE (loop->step),
470 loop->step);
471 break;
472 default:
473 gcc_unreachable ();
476 if (simd
477 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
478 && !fd->have_ordered))
480 if (fd->collapse == 1)
481 iter_type = TREE_TYPE (loop->v);
482 else if (i == 0
483 || TYPE_PRECISION (iter_type)
484 < TYPE_PRECISION (TREE_TYPE (loop->v)))
485 iter_type
486 = build_nonstandard_integer_type
487 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
489 else if (iter_type != long_long_unsigned_type_node)
491 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
492 iter_type = long_long_unsigned_type_node;
493 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
494 && TYPE_PRECISION (TREE_TYPE (loop->v))
495 >= TYPE_PRECISION (iter_type))
497 tree n;
499 if (loop->cond_code == LT_EXPR)
500 n = fold_build2_loc (loc,
501 PLUS_EXPR, TREE_TYPE (loop->v),
502 loop->n2, loop->step);
503 else
504 n = loop->n1;
505 if (TREE_CODE (n) != INTEGER_CST
506 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
507 iter_type = long_long_unsigned_type_node;
509 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
510 > TYPE_PRECISION (iter_type))
512 tree n1, n2;
514 if (loop->cond_code == LT_EXPR)
516 n1 = loop->n1;
517 n2 = fold_build2_loc (loc,
518 PLUS_EXPR, TREE_TYPE (loop->v),
519 loop->n2, loop->step);
521 else
523 n1 = fold_build2_loc (loc,
524 MINUS_EXPR, TREE_TYPE (loop->v),
525 loop->n2, loop->step);
526 n2 = loop->n1;
528 if (TREE_CODE (n1) != INTEGER_CST
529 || TREE_CODE (n2) != INTEGER_CST
530 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
531 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
532 iter_type = long_long_unsigned_type_node;
536 if (collapse_count && *collapse_count == NULL)
538 t = fold_binary (loop->cond_code, boolean_type_node,
539 fold_convert (TREE_TYPE (loop->v), loop->n1),
540 fold_convert (TREE_TYPE (loop->v), loop->n2));
541 if (t && integer_zerop (t))
542 count = build_zero_cst (long_long_unsigned_type_node);
543 else if ((i == 0 || count != NULL_TREE)
544 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
545 && TREE_CONSTANT (loop->n1)
546 && TREE_CONSTANT (loop->n2)
547 && TREE_CODE (loop->step) == INTEGER_CST)
549 tree itype = TREE_TYPE (loop->v);
551 if (POINTER_TYPE_P (itype))
552 itype = signed_type_for (itype);
553 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
554 t = fold_build2_loc (loc,
555 PLUS_EXPR, itype,
556 fold_convert_loc (loc, itype, loop->step), t);
557 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
558 fold_convert_loc (loc, itype, loop->n2));
559 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
560 fold_convert_loc (loc, itype, loop->n1));
561 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
562 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
563 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
564 fold_build1_loc (loc, NEGATE_EXPR, itype,
565 fold_convert_loc (loc, itype,
566 loop->step)));
567 else
568 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
569 fold_convert_loc (loc, itype, loop->step));
570 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
571 if (count != NULL_TREE)
572 count = fold_build2_loc (loc,
573 MULT_EXPR, long_long_unsigned_type_node,
574 count, t);
575 else
576 count = t;
577 if (TREE_CODE (count) != INTEGER_CST)
578 count = NULL_TREE;
580 else if (count && !integer_zerop (count))
581 count = NULL_TREE;
585 if (count
586 && !simd
587 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
588 || fd->have_ordered))
590 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
591 iter_type = long_long_unsigned_type_node;
592 else
593 iter_type = long_integer_type_node;
595 else if (collapse_iter && *collapse_iter != NULL)
596 iter_type = TREE_TYPE (*collapse_iter);
597 fd->iter_type = iter_type;
598 if (collapse_iter && *collapse_iter == NULL)
599 *collapse_iter = create_tmp_var (iter_type, ".iter");
600 if (collapse_count && *collapse_count == NULL)
602 if (count)
603 *collapse_count = fold_convert_loc (loc, iter_type, count);
604 else
605 *collapse_count = create_tmp_var (iter_type, ".count");
608 if (fd->collapse > 1)
610 fd->loop.v = *collapse_iter;
611 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
612 fd->loop.n2 = *collapse_count;
613 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
614 fd->loop.cond_code = LT_EXPR;
619 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
620 is the immediate dominator of PAR_ENTRY_BB, return true if there
621 are no data dependencies that would prevent expanding the parallel
622 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
624 When expanding a combined parallel+workshare region, the call to
625 the child function may need additional arguments in the case of
626 GIMPLE_OMP_FOR regions. In some cases, these arguments are
627 computed out of variables passed in from the parent to the child
628 via 'struct .omp_data_s'. For instance:
630 #pragma omp parallel for schedule (guided, i * 4)
631 for (j ...)
633 Is lowered into:
635 # BLOCK 2 (PAR_ENTRY_BB)
636 .omp_data_o.i = i;
637 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
639 # BLOCK 3 (WS_ENTRY_BB)
640 .omp_data_i = &.omp_data_o;
641 D.1667 = .omp_data_i->i;
642 D.1598 = D.1667 * 4;
643 #pragma omp for schedule (guided, D.1598)
645 When we outline the parallel region, the call to the child function
646 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
647 that value is computed *after* the call site. So, in principle we
648 cannot do the transformation.
650 To see whether the code in WS_ENTRY_BB blocks the combined
651 parallel+workshare call, we collect all the variables used in the
652 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
653 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
654 call.
656 FIXME. If we had the SSA form built at this point, we could merely
657 hoist the code in block 3 into block 2 and be done with it. But at
658 this point we don't have dataflow information and though we could
659 hack something up here, it is really not worth the aggravation. */
661 static bool
662 workshare_safe_to_combine_p (basic_block ws_entry_bb)
664 struct omp_for_data fd;
665 gimple ws_stmt = last_stmt (ws_entry_bb);
667 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
668 return true;
670 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
672 extract_omp_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
674 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
675 return false;
676 if (fd.iter_type != long_integer_type_node)
677 return false;
679 /* FIXME. We give up too easily here. If any of these arguments
680 are not constants, they will likely involve variables that have
681 been mapped into fields of .omp_data_s for sharing with the child
682 function. With appropriate data flow, it would be possible to
683 see through this. */
684 if (!is_gimple_min_invariant (fd.loop.n1)
685 || !is_gimple_min_invariant (fd.loop.n2)
686 || !is_gimple_min_invariant (fd.loop.step)
687 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
688 return false;
690 return true;
694 /* Collect additional arguments needed to emit a combined
695 parallel+workshare call. WS_STMT is the workshare directive being
696 expanded. */
698 static vec<tree, va_gc> *
699 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
701 tree t;
702 location_t loc = gimple_location (ws_stmt);
703 vec<tree, va_gc> *ws_args;
705 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
707 struct omp_for_data fd;
708 tree n1, n2;
710 extract_omp_for_data (for_stmt, &fd, NULL);
711 n1 = fd.loop.n1;
712 n2 = fd.loop.n2;
714 if (gimple_omp_for_combined_into_p (for_stmt))
716 tree innerc
717 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
718 OMP_CLAUSE__LOOPTEMP_);
719 gcc_assert (innerc);
720 n1 = OMP_CLAUSE_DECL (innerc);
721 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
722 OMP_CLAUSE__LOOPTEMP_);
723 gcc_assert (innerc);
724 n2 = OMP_CLAUSE_DECL (innerc);
727 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
729 t = fold_convert_loc (loc, long_integer_type_node, n1);
730 ws_args->quick_push (t);
732 t = fold_convert_loc (loc, long_integer_type_node, n2);
733 ws_args->quick_push (t);
735 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
736 ws_args->quick_push (t);
738 if (fd.chunk_size)
740 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
741 ws_args->quick_push (t);
744 return ws_args;
746 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
748 /* Number of sections is equal to the number of edges from the
749 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
750 the exit of the sections region. */
751 basic_block bb = single_succ (gimple_bb (ws_stmt));
752 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
753 vec_alloc (ws_args, 1);
754 ws_args->quick_push (t);
755 return ws_args;
758 gcc_unreachable ();
762 /* Discover whether REGION is a combined parallel+workshare region. */
764 static void
765 determine_parallel_type (struct omp_region *region)
767 basic_block par_entry_bb, par_exit_bb;
768 basic_block ws_entry_bb, ws_exit_bb;
770 if (region == NULL || region->inner == NULL
771 || region->exit == NULL || region->inner->exit == NULL
772 || region->inner->cont == NULL)
773 return;
775 /* We only support parallel+for and parallel+sections. */
776 if (region->type != GIMPLE_OMP_PARALLEL
777 || (region->inner->type != GIMPLE_OMP_FOR
778 && region->inner->type != GIMPLE_OMP_SECTIONS))
779 return;
781 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
782 WS_EXIT_BB -> PAR_EXIT_BB. */
783 par_entry_bb = region->entry;
784 par_exit_bb = region->exit;
785 ws_entry_bb = region->inner->entry;
786 ws_exit_bb = region->inner->exit;
788 if (single_succ (par_entry_bb) == ws_entry_bb
789 && single_succ (ws_exit_bb) == par_exit_bb
790 && workshare_safe_to_combine_p (ws_entry_bb)
791 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
792 || (last_and_only_stmt (ws_entry_bb)
793 && last_and_only_stmt (par_exit_bb))))
795 gimple par_stmt = last_stmt (par_entry_bb);
796 gimple ws_stmt = last_stmt (ws_entry_bb);
798 if (region->inner->type == GIMPLE_OMP_FOR)
800 /* If this is a combined parallel loop, we need to determine
801 whether or not to use the combined library calls. There
802 are two cases where we do not apply the transformation:
803 static loops and any kind of ordered loop. In the first
804 case, we already open code the loop so there is no need
805 to do anything else. In the latter case, the combined
806 parallel loop call would still need extra synchronization
807 to implement ordered semantics, so there would not be any
808 gain in using the combined call. */
809 tree clauses = gimple_omp_for_clauses (ws_stmt);
810 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
811 if (c == NULL
812 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
813 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
815 region->is_combined_parallel = false;
816 region->inner->is_combined_parallel = false;
817 return;
821 region->is_combined_parallel = true;
822 region->inner->is_combined_parallel = true;
823 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
828 /* Return true if EXPR is variable sized. */
830 static inline bool
831 is_variable_sized (const_tree expr)
833 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
836 /* Return true if DECL is a reference type. */
838 static inline bool
839 is_reference (tree decl)
841 return lang_hooks.decls.omp_privatize_by_reference (decl);
844 /* Lookup variables in the decl or field splay trees. The "maybe" form
845 allows for the variable form to not have been entered, otherwise we
846 assert that the variable must have been entered. */
848 static inline tree
849 lookup_decl (tree var, omp_context *ctx)
851 tree *n = ctx->cb.decl_map->get (var);
852 return *n;
855 static inline tree
856 maybe_lookup_decl (const_tree var, omp_context *ctx)
858 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
859 return n ? *n : NULL_TREE;
862 static inline tree
863 lookup_field (tree var, omp_context *ctx)
865 splay_tree_node n;
866 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
867 return (tree) n->value;
870 static inline tree
871 lookup_sfield (tree var, omp_context *ctx)
873 splay_tree_node n;
874 n = splay_tree_lookup (ctx->sfield_map
875 ? ctx->sfield_map : ctx->field_map,
876 (splay_tree_key) var);
877 return (tree) n->value;
880 static inline tree
881 maybe_lookup_field (tree var, omp_context *ctx)
883 splay_tree_node n;
884 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
885 return n ? (tree) n->value : NULL_TREE;
888 /* Return true if DECL should be copied by pointer. SHARED_CTX is
889 the parallel context if DECL is to be shared. */
891 static bool
892 use_pointer_for_field (tree decl, omp_context *shared_ctx)
894 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
895 return true;
897 /* We can only use copy-in/copy-out semantics for shared variables
898 when we know the value is not accessible from an outer scope. */
899 if (shared_ctx)
901 /* ??? Trivially accessible from anywhere. But why would we even
902 be passing an address in this case? Should we simply assert
903 this to be false, or should we have a cleanup pass that removes
904 these from the list of mappings? */
905 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
906 return true;
908 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
909 without analyzing the expression whether or not its location
910 is accessible to anyone else. In the case of nested parallel
911 regions it certainly may be. */
912 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
913 return true;
915 /* Do not use copy-in/copy-out for variables that have their
916 address taken. */
917 if (TREE_ADDRESSABLE (decl))
918 return true;
920 /* lower_send_shared_vars only uses copy-in, but not copy-out
921 for these. */
922 if (TREE_READONLY (decl)
923 || ((TREE_CODE (decl) == RESULT_DECL
924 || TREE_CODE (decl) == PARM_DECL)
925 && DECL_BY_REFERENCE (decl)))
926 return false;
928 /* Disallow copy-in/out in nested parallel if
929 decl is shared in outer parallel, otherwise
930 each thread could store the shared variable
931 in its own copy-in location, making the
932 variable no longer really shared. */
933 if (shared_ctx->is_nested)
935 omp_context *up;
937 for (up = shared_ctx->outer; up; up = up->outer)
938 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
939 break;
941 if (up)
943 tree c;
945 for (c = gimple_omp_taskreg_clauses (up->stmt);
946 c; c = OMP_CLAUSE_CHAIN (c))
947 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
948 && OMP_CLAUSE_DECL (c) == decl)
949 break;
951 if (c)
952 goto maybe_mark_addressable_and_ret;
956 /* For tasks avoid using copy-in/out. As tasks can be
957 deferred or executed in different thread, when GOMP_task
958 returns, the task hasn't necessarily terminated. */
959 if (is_task_ctx (shared_ctx))
961 tree outer;
962 maybe_mark_addressable_and_ret:
963 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
964 if (is_gimple_reg (outer))
966 /* Taking address of OUTER in lower_send_shared_vars
967 might need regimplification of everything that uses the
968 variable. */
969 if (!task_shared_vars)
970 task_shared_vars = BITMAP_ALLOC (NULL);
971 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
972 TREE_ADDRESSABLE (outer) = 1;
974 return true;
978 return false;
981 /* Construct a new automatic decl similar to VAR. */
983 static tree
984 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
986 tree copy = copy_var_decl (var, name, type);
988 DECL_CONTEXT (copy) = current_function_decl;
989 DECL_CHAIN (copy) = ctx->block_vars;
990 ctx->block_vars = copy;
992 return copy;
995 static tree
996 omp_copy_decl_1 (tree var, omp_context *ctx)
998 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
1001 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
1002 as appropriate. */
1003 static tree
1004 omp_build_component_ref (tree obj, tree field)
1006 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
1007 if (TREE_THIS_VOLATILE (field))
1008 TREE_THIS_VOLATILE (ret) |= 1;
1009 if (TREE_READONLY (field))
1010 TREE_READONLY (ret) |= 1;
1011 return ret;
1014 /* Build tree nodes to access the field for VAR on the receiver side. */
1016 static tree
1017 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
1019 tree x, field = lookup_field (var, ctx);
1021 /* If the receiver record type was remapped in the child function,
1022 remap the field into the new record type. */
1023 x = maybe_lookup_field (field, ctx);
1024 if (x != NULL)
1025 field = x;
1027 x = build_simple_mem_ref (ctx->receiver_decl);
1028 x = omp_build_component_ref (x, field);
1029 if (by_ref)
1030 x = build_simple_mem_ref (x);
1032 return x;
1035 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1036 of a parallel, this is a component reference; for workshare constructs
1037 this is some variable. */
1039 static tree
1040 build_outer_var_ref (tree var, omp_context *ctx)
1042 tree x;
1044 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1045 x = var;
1046 else if (is_variable_sized (var))
1048 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1049 x = build_outer_var_ref (x, ctx);
1050 x = build_simple_mem_ref (x);
1052 else if (is_taskreg_ctx (ctx))
1054 bool by_ref = use_pointer_for_field (var, NULL);
1055 x = build_receiver_ref (var, by_ref, ctx);
1057 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1058 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1060 /* #pragma omp simd isn't a worksharing construct, and can reference even
1061 private vars in its linear etc. clauses. */
1062 x = NULL_TREE;
1063 if (ctx->outer && is_taskreg_ctx (ctx))
1064 x = lookup_decl (var, ctx->outer);
1065 else if (ctx->outer)
1066 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1067 if (x == NULL_TREE)
1068 x = var;
1070 else if (ctx->outer)
1071 x = lookup_decl (var, ctx->outer);
1072 else if (is_reference (var))
1073 /* This can happen with orphaned constructs. If var is reference, it is
1074 possible it is shared and as such valid. */
1075 x = var;
1076 else
1077 gcc_unreachable ();
1079 if (is_reference (var))
1080 x = build_simple_mem_ref (x);
1082 return x;
1085 /* Build tree nodes to access the field for VAR on the sender side. */
1087 static tree
1088 build_sender_ref (tree var, omp_context *ctx)
1090 tree field = lookup_sfield (var, ctx);
1091 return omp_build_component_ref (ctx->sender_decl, field);
1094 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1096 static void
1097 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1099 tree field, type, sfield = NULL_TREE;
1101 gcc_assert ((mask & 1) == 0
1102 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1103 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1104 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1106 type = TREE_TYPE (var);
1107 if (mask & 4)
1109 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1110 type = build_pointer_type (build_pointer_type (type));
1112 else if (by_ref)
1113 type = build_pointer_type (type);
1114 else if ((mask & 3) == 1 && is_reference (var))
1115 type = TREE_TYPE (type);
1117 field = build_decl (DECL_SOURCE_LOCATION (var),
1118 FIELD_DECL, DECL_NAME (var), type);
1120 /* Remember what variable this field was created for. This does have a
1121 side effect of making dwarf2out ignore this member, so for helpful
1122 debugging we clear it later in delete_omp_context. */
1123 DECL_ABSTRACT_ORIGIN (field) = var;
1124 if (type == TREE_TYPE (var))
1126 DECL_ALIGN (field) = DECL_ALIGN (var);
1127 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1128 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1130 else
1131 DECL_ALIGN (field) = TYPE_ALIGN (type);
1133 if ((mask & 3) == 3)
1135 insert_field_into_struct (ctx->record_type, field);
1136 if (ctx->srecord_type)
1138 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1139 FIELD_DECL, DECL_NAME (var), type);
1140 DECL_ABSTRACT_ORIGIN (sfield) = var;
1141 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1142 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1143 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1144 insert_field_into_struct (ctx->srecord_type, sfield);
1147 else
1149 if (ctx->srecord_type == NULL_TREE)
1151 tree t;
1153 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1154 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1155 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1157 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1158 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1159 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1160 insert_field_into_struct (ctx->srecord_type, sfield);
1161 splay_tree_insert (ctx->sfield_map,
1162 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1163 (splay_tree_value) sfield);
1166 sfield = field;
1167 insert_field_into_struct ((mask & 1) ? ctx->record_type
1168 : ctx->srecord_type, field);
1171 if (mask & 1)
1172 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1173 (splay_tree_value) field);
1174 if ((mask & 2) && ctx->sfield_map)
1175 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1176 (splay_tree_value) sfield);
1179 static tree
1180 install_var_local (tree var, omp_context *ctx)
1182 tree new_var = omp_copy_decl_1 (var, ctx);
1183 insert_decl_map (&ctx->cb, var, new_var);
1184 return new_var;
1187 /* Adjust the replacement for DECL in CTX for the new context. This means
1188 copying the DECL_VALUE_EXPR, and fixing up the type. */
1190 static void
1191 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1193 tree new_decl, size;
1195 new_decl = lookup_decl (decl, ctx);
1197 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1199 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1200 && DECL_HAS_VALUE_EXPR_P (decl))
1202 tree ve = DECL_VALUE_EXPR (decl);
1203 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1204 SET_DECL_VALUE_EXPR (new_decl, ve);
1205 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1208 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1210 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1211 if (size == error_mark_node)
1212 size = TYPE_SIZE (TREE_TYPE (new_decl));
1213 DECL_SIZE (new_decl) = size;
1215 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1216 if (size == error_mark_node)
1217 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1218 DECL_SIZE_UNIT (new_decl) = size;
1222 /* The callback for remap_decl. Search all containing contexts for a
1223 mapping of the variable; this avoids having to duplicate the splay
1224 tree ahead of time. We know a mapping doesn't already exist in the
1225 given context. Create new mappings to implement default semantics. */
1227 static tree
1228 omp_copy_decl (tree var, copy_body_data *cb)
1230 omp_context *ctx = (omp_context *) cb;
1231 tree new_var;
1233 if (TREE_CODE (var) == LABEL_DECL)
1235 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1236 DECL_CONTEXT (new_var) = current_function_decl;
1237 insert_decl_map (&ctx->cb, var, new_var);
1238 return new_var;
1241 while (!is_taskreg_ctx (ctx))
1243 ctx = ctx->outer;
1244 if (ctx == NULL)
1245 return var;
1246 new_var = maybe_lookup_decl (var, ctx);
1247 if (new_var)
1248 return new_var;
1251 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1252 return var;
1254 return error_mark_node;
1258 /* Debugging dumps for parallel regions. */
1259 void dump_omp_region (FILE *, struct omp_region *, int);
1260 void debug_omp_region (struct omp_region *);
1261 void debug_all_omp_regions (void);
1263 /* Dump the parallel region tree rooted at REGION. */
1265 void
1266 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1268 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1269 gimple_code_name[region->type]);
1271 if (region->inner)
1272 dump_omp_region (file, region->inner, indent + 4);
1274 if (region->cont)
1276 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1277 region->cont->index);
1280 if (region->exit)
1281 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1282 region->exit->index);
1283 else
1284 fprintf (file, "%*s[no exit marker]\n", indent, "");
1286 if (region->next)
1287 dump_omp_region (file, region->next, indent);
1290 DEBUG_FUNCTION void
1291 debug_omp_region (struct omp_region *region)
1293 dump_omp_region (stderr, region, 0);
1296 DEBUG_FUNCTION void
1297 debug_all_omp_regions (void)
1299 dump_omp_region (stderr, root_omp_region, 0);
1303 /* Create a new parallel region starting at STMT inside region PARENT. */
1305 static struct omp_region *
1306 new_omp_region (basic_block bb, enum gimple_code type,
1307 struct omp_region *parent)
1309 struct omp_region *region = XCNEW (struct omp_region);
1311 region->outer = parent;
1312 region->entry = bb;
1313 region->type = type;
1315 if (parent)
1317 /* This is a nested region. Add it to the list of inner
1318 regions in PARENT. */
1319 region->next = parent->inner;
1320 parent->inner = region;
1322 else
1324 /* This is a toplevel region. Add it to the list of toplevel
1325 regions in ROOT_OMP_REGION. */
1326 region->next = root_omp_region;
1327 root_omp_region = region;
1330 return region;
1333 /* Release the memory associated with the region tree rooted at REGION. */
1335 static void
1336 free_omp_region_1 (struct omp_region *region)
1338 struct omp_region *i, *n;
1340 for (i = region->inner; i ; i = n)
1342 n = i->next;
1343 free_omp_region_1 (i);
1346 free (region);
1349 /* Release the memory for the entire omp region tree. */
1351 void
1352 free_omp_regions (void)
1354 struct omp_region *r, *n;
1355 for (r = root_omp_region; r ; r = n)
1357 n = r->next;
1358 free_omp_region_1 (r);
1360 root_omp_region = NULL;
1364 /* Create a new context, with OUTER_CTX being the surrounding context. */
1366 static omp_context *
1367 new_omp_context (gimple stmt, omp_context *outer_ctx)
1369 omp_context *ctx = XCNEW (omp_context);
1371 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1372 (splay_tree_value) ctx);
1373 ctx->stmt = stmt;
1375 if (outer_ctx)
1377 ctx->outer = outer_ctx;
1378 ctx->cb = outer_ctx->cb;
1379 ctx->cb.block = NULL;
1380 ctx->depth = outer_ctx->depth + 1;
1382 else
1384 ctx->cb.src_fn = current_function_decl;
1385 ctx->cb.dst_fn = current_function_decl;
1386 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1387 gcc_checking_assert (ctx->cb.src_node);
1388 ctx->cb.dst_node = ctx->cb.src_node;
1389 ctx->cb.src_cfun = cfun;
1390 ctx->cb.copy_decl = omp_copy_decl;
1391 ctx->cb.eh_lp_nr = 0;
1392 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1393 ctx->depth = 1;
1396 ctx->cb.decl_map = new hash_map<tree, tree>;
1398 return ctx;
1401 static gimple_seq maybe_catch_exception (gimple_seq);
1403 /* Finalize task copyfn. */
1405 static void
1406 finalize_task_copyfn (gomp_task *task_stmt)
1408 struct function *child_cfun;
1409 tree child_fn;
1410 gimple_seq seq = NULL, new_seq;
1411 gbind *bind;
1413 child_fn = gimple_omp_task_copy_fn (task_stmt);
1414 if (child_fn == NULL_TREE)
1415 return;
1417 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1418 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1420 push_cfun (child_cfun);
1421 bind = gimplify_body (child_fn, false);
1422 gimple_seq_add_stmt (&seq, bind);
1423 new_seq = maybe_catch_exception (seq);
1424 if (new_seq != seq)
1426 bind = gimple_build_bind (NULL, new_seq, NULL);
1427 seq = NULL;
1428 gimple_seq_add_stmt (&seq, bind);
1430 gimple_set_body (child_fn, seq);
1431 pop_cfun ();
1433 /* Inform the callgraph about the new function. */
1434 cgraph_node::add_new_function (child_fn, false);
1437 /* Destroy a omp_context data structures. Called through the splay tree
1438 value delete callback. */
1440 static void
1441 delete_omp_context (splay_tree_value value)
1443 omp_context *ctx = (omp_context *) value;
1445 delete ctx->cb.decl_map;
1447 if (ctx->field_map)
1448 splay_tree_delete (ctx->field_map);
1449 if (ctx->sfield_map)
1450 splay_tree_delete (ctx->sfield_map);
1452 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1453 it produces corrupt debug information. */
1454 if (ctx->record_type)
1456 tree t;
1457 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1458 DECL_ABSTRACT_ORIGIN (t) = NULL;
1460 if (ctx->srecord_type)
1462 tree t;
1463 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1464 DECL_ABSTRACT_ORIGIN (t) = NULL;
1467 if (is_task_ctx (ctx))
1468 finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
1470 XDELETE (ctx);
1473 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1474 context. */
1476 static void
1477 fixup_child_record_type (omp_context *ctx)
1479 tree f, type = ctx->record_type;
1481 /* ??? It isn't sufficient to just call remap_type here, because
1482 variably_modified_type_p doesn't work the way we expect for
1483 record types. Testing each field for whether it needs remapping
1484 and creating a new record by hand works, however. */
1485 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1486 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1487 break;
1488 if (f)
1490 tree name, new_fields = NULL;
1492 type = lang_hooks.types.make_type (RECORD_TYPE);
1493 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1494 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1495 TYPE_DECL, name, type);
1496 TYPE_NAME (type) = name;
1498 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1500 tree new_f = copy_node (f);
1501 DECL_CONTEXT (new_f) = type;
1502 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1503 DECL_CHAIN (new_f) = new_fields;
1504 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1505 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1506 &ctx->cb, NULL);
1507 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1508 &ctx->cb, NULL);
1509 new_fields = new_f;
1511 /* Arrange to be able to look up the receiver field
1512 given the sender field. */
1513 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1514 (splay_tree_value) new_f);
1516 TYPE_FIELDS (type) = nreverse (new_fields);
1517 layout_type (type);
1520 TREE_TYPE (ctx->receiver_decl)
1521 = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
1524 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1525 specified by CLAUSES. */
1527 static void
1528 scan_sharing_clauses (tree clauses, omp_context *ctx)
1530 tree c, decl;
1531 bool scan_array_reductions = false;
1533 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1535 bool by_ref;
1537 switch (OMP_CLAUSE_CODE (c))
1539 case OMP_CLAUSE_PRIVATE:
1540 decl = OMP_CLAUSE_DECL (c);
1541 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1542 goto do_private;
1543 else if (!is_variable_sized (decl))
1544 install_var_local (decl, ctx);
1545 break;
1547 case OMP_CLAUSE_SHARED:
1548 decl = OMP_CLAUSE_DECL (c);
1549 /* Ignore shared directives in teams construct. */
1550 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1552 /* Global variables don't need to be copied,
1553 the receiver side will use them directly. */
1554 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1555 if (is_global_var (odecl))
1556 break;
1557 insert_decl_map (&ctx->cb, decl, odecl);
1558 break;
1560 gcc_assert (is_taskreg_ctx (ctx));
1561 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1562 || !is_variable_sized (decl));
1563 /* Global variables don't need to be copied,
1564 the receiver side will use them directly. */
1565 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1566 break;
1567 by_ref = use_pointer_for_field (decl, ctx);
1568 if (! TREE_READONLY (decl)
1569 || TREE_ADDRESSABLE (decl)
1570 || by_ref
1571 || is_reference (decl))
1573 install_var_field (decl, by_ref, 3, ctx);
1574 install_var_local (decl, ctx);
1575 break;
1577 /* We don't need to copy const scalar vars back. */
1578 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1579 goto do_private;
1581 case OMP_CLAUSE_LASTPRIVATE:
1582 /* Let the corresponding firstprivate clause create
1583 the variable. */
1584 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1585 break;
1586 /* FALLTHRU */
1588 case OMP_CLAUSE_FIRSTPRIVATE:
1589 case OMP_CLAUSE_REDUCTION:
1590 case OMP_CLAUSE_LINEAR:
1591 decl = OMP_CLAUSE_DECL (c);
1592 do_private:
1593 if (is_variable_sized (decl))
1595 if (is_task_ctx (ctx))
1596 install_var_field (decl, false, 1, ctx);
1597 break;
1599 else if (is_taskreg_ctx (ctx))
1601 bool global
1602 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1603 by_ref = use_pointer_for_field (decl, NULL);
1605 if (is_task_ctx (ctx)
1606 && (global || by_ref || is_reference (decl)))
1608 install_var_field (decl, false, 1, ctx);
1609 if (!global)
1610 install_var_field (decl, by_ref, 2, ctx);
1612 else if (!global)
1613 install_var_field (decl, by_ref, 3, ctx);
1615 install_var_local (decl, ctx);
1616 break;
1618 case OMP_CLAUSE__LOOPTEMP_:
1619 gcc_assert (is_parallel_ctx (ctx));
1620 decl = OMP_CLAUSE_DECL (c);
1621 install_var_field (decl, false, 3, ctx);
1622 install_var_local (decl, ctx);
1623 break;
1625 case OMP_CLAUSE_COPYPRIVATE:
1626 case OMP_CLAUSE_COPYIN:
1627 decl = OMP_CLAUSE_DECL (c);
1628 by_ref = use_pointer_for_field (decl, NULL);
1629 install_var_field (decl, by_ref, 3, ctx);
1630 break;
1632 case OMP_CLAUSE_DEFAULT:
1633 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1634 break;
1636 case OMP_CLAUSE_FINAL:
1637 case OMP_CLAUSE_IF:
1638 case OMP_CLAUSE_NUM_THREADS:
1639 case OMP_CLAUSE_NUM_TEAMS:
1640 case OMP_CLAUSE_THREAD_LIMIT:
1641 case OMP_CLAUSE_DEVICE:
1642 case OMP_CLAUSE_SCHEDULE:
1643 case OMP_CLAUSE_DIST_SCHEDULE:
1644 case OMP_CLAUSE_DEPEND:
1645 case OMP_CLAUSE__CILK_FOR_COUNT_:
1646 if (ctx->outer)
1647 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1648 break;
1650 case OMP_CLAUSE_TO:
1651 case OMP_CLAUSE_FROM:
1652 case OMP_CLAUSE_MAP:
1653 if (ctx->outer)
1654 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1655 decl = OMP_CLAUSE_DECL (c);
1656 /* Global variables with "omp declare target" attribute
1657 don't need to be copied, the receiver side will use them
1658 directly. */
1659 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1660 && DECL_P (decl)
1661 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1662 && varpool_node::get_create (decl)->offloadable)
1663 break;
1664 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1665 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1667 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1668 #pragma omp target data, there is nothing to map for
1669 those. */
1670 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1671 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1672 break;
1674 if (DECL_P (decl))
1676 if (DECL_SIZE (decl)
1677 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1679 tree decl2 = DECL_VALUE_EXPR (decl);
1680 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1681 decl2 = TREE_OPERAND (decl2, 0);
1682 gcc_assert (DECL_P (decl2));
1683 install_var_field (decl2, true, 3, ctx);
1684 install_var_local (decl2, ctx);
1685 install_var_local (decl, ctx);
1687 else
1689 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1690 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1691 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1692 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1693 install_var_field (decl, true, 7, ctx);
1694 else
1695 install_var_field (decl, true, 3, ctx);
1696 if (gimple_omp_target_kind (ctx->stmt)
1697 == GF_OMP_TARGET_KIND_REGION)
1698 install_var_local (decl, ctx);
1701 else
1703 tree base = get_base_address (decl);
1704 tree nc = OMP_CLAUSE_CHAIN (c);
1705 if (DECL_P (base)
1706 && nc != NULL_TREE
1707 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1708 && OMP_CLAUSE_DECL (nc) == base
1709 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1710 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1712 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1713 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1715 else
1717 if (ctx->outer)
1719 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1720 decl = OMP_CLAUSE_DECL (c);
1722 gcc_assert (!splay_tree_lookup (ctx->field_map,
1723 (splay_tree_key) decl));
1724 tree field
1725 = build_decl (OMP_CLAUSE_LOCATION (c),
1726 FIELD_DECL, NULL_TREE, ptr_type_node);
1727 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1728 insert_field_into_struct (ctx->record_type, field);
1729 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1730 (splay_tree_value) field);
1733 break;
1735 case OMP_CLAUSE_NOWAIT:
1736 case OMP_CLAUSE_ORDERED:
1737 case OMP_CLAUSE_COLLAPSE:
1738 case OMP_CLAUSE_UNTIED:
1739 case OMP_CLAUSE_MERGEABLE:
1740 case OMP_CLAUSE_PROC_BIND:
1741 case OMP_CLAUSE_SAFELEN:
1742 break;
1744 case OMP_CLAUSE_ALIGNED:
1745 decl = OMP_CLAUSE_DECL (c);
1746 if (is_global_var (decl)
1747 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1748 install_var_local (decl, ctx);
1749 break;
1751 default:
1752 gcc_unreachable ();
1756 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1758 switch (OMP_CLAUSE_CODE (c))
1760 case OMP_CLAUSE_LASTPRIVATE:
1761 /* Let the corresponding firstprivate clause create
1762 the variable. */
1763 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1764 scan_array_reductions = true;
1765 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1766 break;
1767 /* FALLTHRU */
1769 case OMP_CLAUSE_PRIVATE:
1770 case OMP_CLAUSE_FIRSTPRIVATE:
1771 case OMP_CLAUSE_REDUCTION:
1772 case OMP_CLAUSE_LINEAR:
1773 decl = OMP_CLAUSE_DECL (c);
1774 if (is_variable_sized (decl))
1775 install_var_local (decl, ctx);
1776 fixup_remapped_decl (decl, ctx,
1777 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1778 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1779 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1780 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1781 scan_array_reductions = true;
1782 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1783 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1784 scan_array_reductions = true;
1785 break;
1787 case OMP_CLAUSE_SHARED:
1788 /* Ignore shared directives in teams construct. */
1789 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1790 break;
1791 decl = OMP_CLAUSE_DECL (c);
1792 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1793 fixup_remapped_decl (decl, ctx, false);
1794 break;
1796 case OMP_CLAUSE_MAP:
1797 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1798 break;
1799 decl = OMP_CLAUSE_DECL (c);
1800 if (DECL_P (decl)
1801 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1802 && varpool_node::get_create (decl)->offloadable)
1803 break;
1804 if (DECL_P (decl))
1806 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1807 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1808 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1810 tree new_decl = lookup_decl (decl, ctx);
1811 TREE_TYPE (new_decl)
1812 = remap_type (TREE_TYPE (decl), &ctx->cb);
1814 else if (DECL_SIZE (decl)
1815 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1817 tree decl2 = DECL_VALUE_EXPR (decl);
1818 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1819 decl2 = TREE_OPERAND (decl2, 0);
1820 gcc_assert (DECL_P (decl2));
1821 fixup_remapped_decl (decl2, ctx, false);
1822 fixup_remapped_decl (decl, ctx, true);
1824 else
1825 fixup_remapped_decl (decl, ctx, false);
1827 break;
1829 case OMP_CLAUSE_COPYPRIVATE:
1830 case OMP_CLAUSE_COPYIN:
1831 case OMP_CLAUSE_DEFAULT:
1832 case OMP_CLAUSE_IF:
1833 case OMP_CLAUSE_NUM_THREADS:
1834 case OMP_CLAUSE_NUM_TEAMS:
1835 case OMP_CLAUSE_THREAD_LIMIT:
1836 case OMP_CLAUSE_DEVICE:
1837 case OMP_CLAUSE_SCHEDULE:
1838 case OMP_CLAUSE_DIST_SCHEDULE:
1839 case OMP_CLAUSE_NOWAIT:
1840 case OMP_CLAUSE_ORDERED:
1841 case OMP_CLAUSE_COLLAPSE:
1842 case OMP_CLAUSE_UNTIED:
1843 case OMP_CLAUSE_FINAL:
1844 case OMP_CLAUSE_MERGEABLE:
1845 case OMP_CLAUSE_PROC_BIND:
1846 case OMP_CLAUSE_SAFELEN:
1847 case OMP_CLAUSE_ALIGNED:
1848 case OMP_CLAUSE_DEPEND:
1849 case OMP_CLAUSE__LOOPTEMP_:
1850 case OMP_CLAUSE_TO:
1851 case OMP_CLAUSE_FROM:
1852 case OMP_CLAUSE__CILK_FOR_COUNT_:
1853 break;
1855 default:
1856 gcc_unreachable ();
1860 if (scan_array_reductions)
1861 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1862 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1863 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1865 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1866 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1868 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1869 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1870 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1871 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1872 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1873 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1876 /* Create a new name for omp child function. Returns an identifier. If
1877 IS_CILK_FOR is true then the suffix for the child function is
1878 "_cilk_for_fn." */
1880 static tree
1881 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
1883 if (is_cilk_for)
1884 return clone_function_name (current_function_decl, "_cilk_for_fn");
1885 return clone_function_name (current_function_decl,
1886 task_copy ? "_omp_cpyfn" : "_omp_fn");
1889 /* Returns the type of the induction variable for the child function for
1890 _Cilk_for and the types for _high and _low variables based on TYPE. */
1892 static tree
1893 cilk_for_check_loop_diff_type (tree type)
1895 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
1897 if (TYPE_UNSIGNED (type))
1898 return uint32_type_node;
1899 else
1900 return integer_type_node;
1902 else
1904 if (TYPE_UNSIGNED (type))
1905 return uint64_type_node;
1906 else
1907 return long_long_integer_type_node;
1911 /* Build a decl for the omp child function. It'll not contain a body
1912 yet, just the bare decl. */
1914 static void
1915 create_omp_child_function (omp_context *ctx, bool task_copy)
1917 tree decl, type, name, t;
1919 tree cilk_for_count
1920 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
1921 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1922 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
1923 tree cilk_var_type = NULL_TREE;
1925 name = create_omp_child_function_name (task_copy,
1926 cilk_for_count != NULL_TREE);
1927 if (task_copy)
1928 type = build_function_type_list (void_type_node, ptr_type_node,
1929 ptr_type_node, NULL_TREE);
1930 else if (cilk_for_count)
1932 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
1933 cilk_var_type = cilk_for_check_loop_diff_type (type);
1934 type = build_function_type_list (void_type_node, ptr_type_node,
1935 cilk_var_type, cilk_var_type, NULL_TREE);
1937 else
1938 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1940 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
1942 if (!task_copy)
1943 ctx->cb.dst_fn = decl;
1944 else
1945 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1947 TREE_STATIC (decl) = 1;
1948 TREE_USED (decl) = 1;
1949 DECL_ARTIFICIAL (decl) = 1;
1950 DECL_IGNORED_P (decl) = 0;
1951 TREE_PUBLIC (decl) = 0;
1952 DECL_UNINLINABLE (decl) = 1;
1953 DECL_EXTERNAL (decl) = 0;
1954 DECL_CONTEXT (decl) = NULL_TREE;
1955 DECL_INITIAL (decl) = make_node (BLOCK);
1956 if (cgraph_node::get (current_function_decl)->offloadable)
1957 cgraph_node::get_create (decl)->offloadable = 1;
1958 else
1960 omp_context *octx;
1961 for (octx = ctx; octx; octx = octx->outer)
1962 if (is_targetreg_ctx (octx))
1964 cgraph_node::get_create (decl)->offloadable = 1;
1965 #ifdef ENABLE_OFFLOADING
1966 g->have_offload = true;
1967 #endif
1968 break;
1972 t = build_decl (DECL_SOURCE_LOCATION (decl),
1973 RESULT_DECL, NULL_TREE, void_type_node);
1974 DECL_ARTIFICIAL (t) = 1;
1975 DECL_IGNORED_P (t) = 1;
1976 DECL_CONTEXT (t) = decl;
1977 DECL_RESULT (decl) = t;
1979 /* _Cilk_for's child function requires two extra parameters called
1980 __low and __high that are set the by Cilk runtime when it calls this
1981 function. */
1982 if (cilk_for_count)
1984 t = build_decl (DECL_SOURCE_LOCATION (decl),
1985 PARM_DECL, get_identifier ("__high"), cilk_var_type);
1986 DECL_ARTIFICIAL (t) = 1;
1987 DECL_NAMELESS (t) = 1;
1988 DECL_ARG_TYPE (t) = ptr_type_node;
1989 DECL_CONTEXT (t) = current_function_decl;
1990 TREE_USED (t) = 1;
1991 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1992 DECL_ARGUMENTS (decl) = t;
1994 t = build_decl (DECL_SOURCE_LOCATION (decl),
1995 PARM_DECL, get_identifier ("__low"), cilk_var_type);
1996 DECL_ARTIFICIAL (t) = 1;
1997 DECL_NAMELESS (t) = 1;
1998 DECL_ARG_TYPE (t) = ptr_type_node;
1999 DECL_CONTEXT (t) = current_function_decl;
2000 TREE_USED (t) = 1;
2001 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2002 DECL_ARGUMENTS (decl) = t;
2005 tree data_name = get_identifier (".omp_data_i");
2006 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
2007 ptr_type_node);
2008 DECL_ARTIFICIAL (t) = 1;
2009 DECL_NAMELESS (t) = 1;
2010 DECL_ARG_TYPE (t) = ptr_type_node;
2011 DECL_CONTEXT (t) = current_function_decl;
2012 TREE_USED (t) = 1;
2013 if (cilk_for_count)
2014 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2015 DECL_ARGUMENTS (decl) = t;
2016 if (!task_copy)
2017 ctx->receiver_decl = t;
2018 else
2020 t = build_decl (DECL_SOURCE_LOCATION (decl),
2021 PARM_DECL, get_identifier (".omp_data_o"),
2022 ptr_type_node);
2023 DECL_ARTIFICIAL (t) = 1;
2024 DECL_NAMELESS (t) = 1;
2025 DECL_ARG_TYPE (t) = ptr_type_node;
2026 DECL_CONTEXT (t) = current_function_decl;
2027 TREE_USED (t) = 1;
2028 TREE_ADDRESSABLE (t) = 1;
2029 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2030 DECL_ARGUMENTS (decl) = t;
2033 /* Allocate memory for the function structure. The call to
2034 allocate_struct_function clobbers CFUN, so we need to restore
2035 it afterward. */
2036 push_struct_function (decl);
2037 cfun->function_end_locus = gimple_location (ctx->stmt);
2038 pop_cfun ();
2041 /* Callback for walk_gimple_seq. Check if combined parallel
2042 contains gimple_omp_for_combined_into_p OMP_FOR. */
2044 static tree
2045 find_combined_for (gimple_stmt_iterator *gsi_p,
2046 bool *handled_ops_p,
2047 struct walk_stmt_info *wi)
2049 gimple stmt = gsi_stmt (*gsi_p);
2051 *handled_ops_p = true;
2052 switch (gimple_code (stmt))
2054 WALK_SUBSTMTS;
2056 case GIMPLE_OMP_FOR:
2057 if (gimple_omp_for_combined_into_p (stmt)
2058 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2060 wi->info = stmt;
2061 return integer_zero_node;
2063 break;
2064 default:
2065 break;
2067 return NULL;
2070 /* Scan an OpenMP parallel directive. */
2072 static void
2073 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2075 omp_context *ctx;
2076 tree name;
2077 gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
2079 /* Ignore parallel directives with empty bodies, unless there
2080 are copyin clauses. */
2081 if (optimize > 0
2082 && empty_body_p (gimple_omp_body (stmt))
2083 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2084 OMP_CLAUSE_COPYIN) == NULL)
2086 gsi_replace (gsi, gimple_build_nop (), false);
2087 return;
2090 if (gimple_omp_parallel_combined_p (stmt))
2092 struct walk_stmt_info wi;
2094 memset (&wi, 0, sizeof (wi));
2095 wi.val_only = true;
2096 walk_gimple_seq (gimple_omp_body (stmt),
2097 find_combined_for, NULL, &wi);
2098 if (wi.info)
2100 gomp_for *for_stmt = as_a <gomp_for *> ((gimple) wi.info);
2101 struct omp_for_data fd;
2102 extract_omp_for_data (for_stmt, &fd, NULL);
2103 /* We need two temporaries with fd.loop.v type (istart/iend)
2104 and then (fd.collapse - 1) temporaries with the same
2105 type for count2 ... countN-1 vars if not constant. */
2106 size_t count = 2, i;
2107 tree type = fd.iter_type;
2108 if (fd.collapse > 1
2109 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2110 count += fd.collapse - 1;
2111 for (i = 0; i < count; i++)
2113 tree temp = create_tmp_var (type);
2114 tree c = build_omp_clause (UNKNOWN_LOCATION,
2115 OMP_CLAUSE__LOOPTEMP_);
2116 insert_decl_map (&outer_ctx->cb, temp, temp);
2117 OMP_CLAUSE_DECL (c) = temp;
2118 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2119 gimple_omp_parallel_set_clauses (stmt, c);
2124 ctx = new_omp_context (stmt, outer_ctx);
2125 taskreg_contexts.safe_push (ctx);
2126 if (taskreg_nesting_level > 1)
2127 ctx->is_nested = true;
2128 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2129 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2130 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2131 name = create_tmp_var_name (".omp_data_s");
2132 name = build_decl (gimple_location (stmt),
2133 TYPE_DECL, name, ctx->record_type);
2134 DECL_ARTIFICIAL (name) = 1;
2135 DECL_NAMELESS (name) = 1;
2136 TYPE_NAME (ctx->record_type) = name;
2137 create_omp_child_function (ctx, false);
2138 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2140 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2141 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2143 if (TYPE_FIELDS (ctx->record_type) == NULL)
2144 ctx->record_type = ctx->receiver_decl = NULL;
2147 /* Scan an OpenMP task directive. */
2149 static void
2150 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2152 omp_context *ctx;
2153 tree name, t;
2154 gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
2156 /* Ignore task directives with empty bodies. */
2157 if (optimize > 0
2158 && empty_body_p (gimple_omp_body (stmt)))
2160 gsi_replace (gsi, gimple_build_nop (), false);
2161 return;
2164 ctx = new_omp_context (stmt, outer_ctx);
2165 taskreg_contexts.safe_push (ctx);
2166 if (taskreg_nesting_level > 1)
2167 ctx->is_nested = true;
2168 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2169 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2170 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2171 name = create_tmp_var_name (".omp_data_s");
2172 name = build_decl (gimple_location (stmt),
2173 TYPE_DECL, name, ctx->record_type);
2174 DECL_ARTIFICIAL (name) = 1;
2175 DECL_NAMELESS (name) = 1;
2176 TYPE_NAME (ctx->record_type) = name;
2177 create_omp_child_function (ctx, false);
2178 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2180 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2182 if (ctx->srecord_type)
2184 name = create_tmp_var_name (".omp_data_a");
2185 name = build_decl (gimple_location (stmt),
2186 TYPE_DECL, name, ctx->srecord_type);
2187 DECL_ARTIFICIAL (name) = 1;
2188 DECL_NAMELESS (name) = 1;
2189 TYPE_NAME (ctx->srecord_type) = name;
2190 create_omp_child_function (ctx, true);
2193 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2195 if (TYPE_FIELDS (ctx->record_type) == NULL)
2197 ctx->record_type = ctx->receiver_decl = NULL;
2198 t = build_int_cst (long_integer_type_node, 0);
2199 gimple_omp_task_set_arg_size (stmt, t);
2200 t = build_int_cst (long_integer_type_node, 1);
2201 gimple_omp_task_set_arg_align (stmt, t);
2206 /* If any decls have been made addressable during scan_omp,
2207 adjust their fields if needed, and layout record types
2208 of parallel/task constructs. */
2210 static void
2211 finish_taskreg_scan (omp_context *ctx)
2213 if (ctx->record_type == NULL_TREE)
2214 return;
2216 /* If any task_shared_vars were needed, verify all
2217 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2218 statements if use_pointer_for_field hasn't changed
2219 because of that. If it did, update field types now. */
2220 if (task_shared_vars)
2222 tree c;
2224 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2225 c; c = OMP_CLAUSE_CHAIN (c))
2226 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
2228 tree decl = OMP_CLAUSE_DECL (c);
2230 /* Global variables don't need to be copied,
2231 the receiver side will use them directly. */
2232 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2233 continue;
2234 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2235 || !use_pointer_for_field (decl, ctx))
2236 continue;
2237 tree field = lookup_field (decl, ctx);
2238 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2239 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2240 continue;
2241 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2242 TREE_THIS_VOLATILE (field) = 0;
2243 DECL_USER_ALIGN (field) = 0;
2244 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2245 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2246 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2247 if (ctx->srecord_type)
2249 tree sfield = lookup_sfield (decl, ctx);
2250 TREE_TYPE (sfield) = TREE_TYPE (field);
2251 TREE_THIS_VOLATILE (sfield) = 0;
2252 DECL_USER_ALIGN (sfield) = 0;
2253 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2254 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2255 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2260 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2262 layout_type (ctx->record_type);
2263 fixup_child_record_type (ctx);
2265 else
2267 location_t loc = gimple_location (ctx->stmt);
2268 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2269 /* Move VLA fields to the end. */
2270 p = &TYPE_FIELDS (ctx->record_type);
2271 while (*p)
2272 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2273 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2275 *q = *p;
2276 *p = TREE_CHAIN (*p);
2277 TREE_CHAIN (*q) = NULL_TREE;
2278 q = &TREE_CHAIN (*q);
2280 else
2281 p = &DECL_CHAIN (*p);
2282 *p = vla_fields;
2283 layout_type (ctx->record_type);
2284 fixup_child_record_type (ctx);
2285 if (ctx->srecord_type)
2286 layout_type (ctx->srecord_type);
2287 tree t = fold_convert_loc (loc, long_integer_type_node,
2288 TYPE_SIZE_UNIT (ctx->record_type));
2289 gimple_omp_task_set_arg_size (ctx->stmt, t);
2290 t = build_int_cst (long_integer_type_node,
2291 TYPE_ALIGN_UNIT (ctx->record_type));
2292 gimple_omp_task_set_arg_align (ctx->stmt, t);
2297 /* Scan an OpenMP loop directive. */
2299 static void
2300 scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
2302 omp_context *ctx;
2303 size_t i;
2305 ctx = new_omp_context (stmt, outer_ctx);
2307 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2309 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2310 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2312 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2313 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2314 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2315 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2317 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2320 /* Scan an OpenMP sections directive. */
2322 static void
2323 scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
2325 omp_context *ctx;
2327 ctx = new_omp_context (stmt, outer_ctx);
2328 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2329 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2332 /* Scan an OpenMP single directive. */
2334 static void
2335 scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
2337 omp_context *ctx;
2338 tree name;
2340 ctx = new_omp_context (stmt, outer_ctx);
2341 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2342 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2343 name = create_tmp_var_name (".omp_copy_s");
2344 name = build_decl (gimple_location (stmt),
2345 TYPE_DECL, name, ctx->record_type);
2346 TYPE_NAME (ctx->record_type) = name;
2348 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2349 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2351 if (TYPE_FIELDS (ctx->record_type) == NULL)
2352 ctx->record_type = NULL;
2353 else
2354 layout_type (ctx->record_type);
2357 /* Scan an OpenMP target{, data, update} directive. */
2359 static void
2360 scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
2362 omp_context *ctx;
2363 tree name;
2364 int kind = gimple_omp_target_kind (stmt);
2366 ctx = new_omp_context (stmt, outer_ctx);
2367 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2368 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2369 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2370 name = create_tmp_var_name (".omp_data_t");
2371 name = build_decl (gimple_location (stmt),
2372 TYPE_DECL, name, ctx->record_type);
2373 DECL_ARTIFICIAL (name) = 1;
2374 DECL_NAMELESS (name) = 1;
2375 TYPE_NAME (ctx->record_type) = name;
2376 if (kind == GF_OMP_TARGET_KIND_REGION)
2378 create_omp_child_function (ctx, false);
2379 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2382 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2383 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2385 if (TYPE_FIELDS (ctx->record_type) == NULL)
2386 ctx->record_type = ctx->receiver_decl = NULL;
2387 else
2389 TYPE_FIELDS (ctx->record_type)
2390 = nreverse (TYPE_FIELDS (ctx->record_type));
2391 #ifdef ENABLE_CHECKING
2392 tree field;
2393 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2394 for (field = TYPE_FIELDS (ctx->record_type);
2395 field;
2396 field = DECL_CHAIN (field))
2397 gcc_assert (DECL_ALIGN (field) == align);
2398 #endif
2399 layout_type (ctx->record_type);
2400 if (kind == GF_OMP_TARGET_KIND_REGION)
2401 fixup_child_record_type (ctx);
2405 /* Scan an OpenMP teams directive. */
2407 static void
2408 scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
2410 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2411 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2412 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2415 /* Check OpenMP nesting restrictions. */
2416 static bool
2417 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2419 if (ctx != NULL)
2421 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2422 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2424 error_at (gimple_location (stmt),
2425 "OpenMP constructs may not be nested inside simd region");
2426 return false;
2428 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2430 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2431 || (gimple_omp_for_kind (stmt)
2432 != GF_OMP_FOR_KIND_DISTRIBUTE))
2433 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2435 error_at (gimple_location (stmt),
2436 "only distribute or parallel constructs are allowed to "
2437 "be closely nested inside teams construct");
2438 return false;
2442 switch (gimple_code (stmt))
2444 case GIMPLE_OMP_FOR:
2445 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2446 return true;
2447 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2449 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2451 error_at (gimple_location (stmt),
2452 "distribute construct must be closely nested inside "
2453 "teams construct");
2454 return false;
2456 return true;
2458 /* FALLTHRU */
2459 case GIMPLE_CALL:
2460 if (is_gimple_call (stmt)
2461 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2462 == BUILT_IN_GOMP_CANCEL
2463 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2464 == BUILT_IN_GOMP_CANCELLATION_POINT))
2466 const char *bad = NULL;
2467 const char *kind = NULL;
2468 if (ctx == NULL)
2470 error_at (gimple_location (stmt), "orphaned %qs construct",
2471 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2472 == BUILT_IN_GOMP_CANCEL
2473 ? "#pragma omp cancel"
2474 : "#pragma omp cancellation point");
2475 return false;
2477 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2478 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2479 : 0)
2481 case 1:
2482 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2483 bad = "#pragma omp parallel";
2484 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2485 == BUILT_IN_GOMP_CANCEL
2486 && !integer_zerop (gimple_call_arg (stmt, 1)))
2487 ctx->cancellable = true;
2488 kind = "parallel";
2489 break;
2490 case 2:
2491 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2492 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2493 bad = "#pragma omp for";
2494 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2495 == BUILT_IN_GOMP_CANCEL
2496 && !integer_zerop (gimple_call_arg (stmt, 1)))
2498 ctx->cancellable = true;
2499 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2500 OMP_CLAUSE_NOWAIT))
2501 warning_at (gimple_location (stmt), 0,
2502 "%<#pragma omp cancel for%> inside "
2503 "%<nowait%> for construct");
2504 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2505 OMP_CLAUSE_ORDERED))
2506 warning_at (gimple_location (stmt), 0,
2507 "%<#pragma omp cancel for%> inside "
2508 "%<ordered%> for construct");
2510 kind = "for";
2511 break;
2512 case 4:
2513 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2514 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2515 bad = "#pragma omp sections";
2516 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2517 == BUILT_IN_GOMP_CANCEL
2518 && !integer_zerop (gimple_call_arg (stmt, 1)))
2520 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2522 ctx->cancellable = true;
2523 if (find_omp_clause (gimple_omp_sections_clauses
2524 (ctx->stmt),
2525 OMP_CLAUSE_NOWAIT))
2526 warning_at (gimple_location (stmt), 0,
2527 "%<#pragma omp cancel sections%> inside "
2528 "%<nowait%> sections construct");
2530 else
2532 gcc_assert (ctx->outer
2533 && gimple_code (ctx->outer->stmt)
2534 == GIMPLE_OMP_SECTIONS);
2535 ctx->outer->cancellable = true;
2536 if (find_omp_clause (gimple_omp_sections_clauses
2537 (ctx->outer->stmt),
2538 OMP_CLAUSE_NOWAIT))
2539 warning_at (gimple_location (stmt), 0,
2540 "%<#pragma omp cancel sections%> inside "
2541 "%<nowait%> sections construct");
2544 kind = "sections";
2545 break;
2546 case 8:
2547 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2548 bad = "#pragma omp task";
2549 else
2550 ctx->cancellable = true;
2551 kind = "taskgroup";
2552 break;
2553 default:
2554 error_at (gimple_location (stmt), "invalid arguments");
2555 return false;
2557 if (bad)
2559 error_at (gimple_location (stmt),
2560 "%<%s %s%> construct not closely nested inside of %qs",
2561 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2562 == BUILT_IN_GOMP_CANCEL
2563 ? "#pragma omp cancel"
2564 : "#pragma omp cancellation point", kind, bad);
2565 return false;
2568 /* FALLTHRU */
2569 case GIMPLE_OMP_SECTIONS:
2570 case GIMPLE_OMP_SINGLE:
2571 for (; ctx != NULL; ctx = ctx->outer)
2572 switch (gimple_code (ctx->stmt))
2574 case GIMPLE_OMP_FOR:
2575 case GIMPLE_OMP_SECTIONS:
2576 case GIMPLE_OMP_SINGLE:
2577 case GIMPLE_OMP_ORDERED:
2578 case GIMPLE_OMP_MASTER:
2579 case GIMPLE_OMP_TASK:
2580 case GIMPLE_OMP_CRITICAL:
2581 if (is_gimple_call (stmt))
2583 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2584 != BUILT_IN_GOMP_BARRIER)
2585 return true;
2586 error_at (gimple_location (stmt),
2587 "barrier region may not be closely nested inside "
2588 "of work-sharing, critical, ordered, master or "
2589 "explicit task region");
2590 return false;
2592 error_at (gimple_location (stmt),
2593 "work-sharing region may not be closely nested inside "
2594 "of work-sharing, critical, ordered, master or explicit "
2595 "task region");
2596 return false;
2597 case GIMPLE_OMP_PARALLEL:
2598 return true;
2599 default:
2600 break;
2602 break;
2603 case GIMPLE_OMP_MASTER:
2604 for (; ctx != NULL; ctx = ctx->outer)
2605 switch (gimple_code (ctx->stmt))
2607 case GIMPLE_OMP_FOR:
2608 case GIMPLE_OMP_SECTIONS:
2609 case GIMPLE_OMP_SINGLE:
2610 case GIMPLE_OMP_TASK:
2611 error_at (gimple_location (stmt),
2612 "master region may not be closely nested inside "
2613 "of work-sharing or explicit task region");
2614 return false;
2615 case GIMPLE_OMP_PARALLEL:
2616 return true;
2617 default:
2618 break;
2620 break;
2621 case GIMPLE_OMP_ORDERED:
2622 for (; ctx != NULL; ctx = ctx->outer)
2623 switch (gimple_code (ctx->stmt))
2625 case GIMPLE_OMP_CRITICAL:
2626 case GIMPLE_OMP_TASK:
2627 error_at (gimple_location (stmt),
2628 "ordered region may not be closely nested inside "
2629 "of critical or explicit task region");
2630 return false;
2631 case GIMPLE_OMP_FOR:
2632 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2633 OMP_CLAUSE_ORDERED) == NULL)
2635 error_at (gimple_location (stmt),
2636 "ordered region must be closely nested inside "
2637 "a loop region with an ordered clause");
2638 return false;
2640 return true;
2641 case GIMPLE_OMP_PARALLEL:
2642 error_at (gimple_location (stmt),
2643 "ordered region must be closely nested inside "
2644 "a loop region with an ordered clause");
2645 return false;
2646 default:
2647 break;
2649 break;
2650 case GIMPLE_OMP_CRITICAL:
2652 tree this_stmt_name
2653 = gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
2654 for (; ctx != NULL; ctx = ctx->outer)
2655 if (gomp_critical *other_crit
2656 = dyn_cast <gomp_critical *> (ctx->stmt))
2657 if (this_stmt_name == gimple_omp_critical_name (other_crit))
2659 error_at (gimple_location (stmt),
2660 "critical region may not be nested inside a critical "
2661 "region with the same name");
2662 return false;
2665 break;
2666 case GIMPLE_OMP_TEAMS:
2667 if (ctx == NULL
2668 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2669 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2671 error_at (gimple_location (stmt),
2672 "teams construct not closely nested inside of target "
2673 "region");
2674 return false;
2676 break;
2677 case GIMPLE_OMP_TARGET:
2678 for (; ctx != NULL; ctx = ctx->outer)
2679 if (is_targetreg_ctx (ctx))
2681 const char *name;
2682 switch (gimple_omp_target_kind (stmt))
2684 case GF_OMP_TARGET_KIND_REGION: name = "target"; break;
2685 case GF_OMP_TARGET_KIND_DATA: name = "target data"; break;
2686 case GF_OMP_TARGET_KIND_UPDATE: name = "target update"; break;
2687 default: gcc_unreachable ();
2689 warning_at (gimple_location (stmt), 0,
2690 "%s construct inside of target region", name);
2692 break;
2693 default:
2694 break;
2696 return true;
2700 /* Helper function scan_omp.
2702 Callback for walk_tree or operators in walk_gimple_stmt used to
2703 scan for OpenMP directives in TP. */
2705 static tree
2706 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2708 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2709 omp_context *ctx = (omp_context *) wi->info;
2710 tree t = *tp;
2712 switch (TREE_CODE (t))
2714 case VAR_DECL:
2715 case PARM_DECL:
2716 case LABEL_DECL:
2717 case RESULT_DECL:
2718 if (ctx)
2719 *tp = remap_decl (t, &ctx->cb);
2720 break;
2722 default:
2723 if (ctx && TYPE_P (t))
2724 *tp = remap_type (t, &ctx->cb);
2725 else if (!DECL_P (t))
2727 *walk_subtrees = 1;
2728 if (ctx)
2730 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2731 if (tem != TREE_TYPE (t))
2733 if (TREE_CODE (t) == INTEGER_CST)
2734 *tp = wide_int_to_tree (tem, t);
2735 else
2736 TREE_TYPE (t) = tem;
2740 break;
2743 return NULL_TREE;
2746 /* Return true if FNDECL is a setjmp or a longjmp. */
2748 static bool
2749 setjmp_or_longjmp_p (const_tree fndecl)
2751 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2752 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2753 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2754 return true;
2756 tree declname = DECL_NAME (fndecl);
2757 if (!declname)
2758 return false;
2759 const char *name = IDENTIFIER_POINTER (declname);
2760 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2764 /* Helper function for scan_omp.
2766 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2767 the current statement in GSI. */
2769 static tree
2770 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2771 struct walk_stmt_info *wi)
2773 gimple stmt = gsi_stmt (*gsi);
2774 omp_context *ctx = (omp_context *) wi->info;
2776 if (gimple_has_location (stmt))
2777 input_location = gimple_location (stmt);
2779 /* Check the OpenMP nesting restrictions. */
2780 bool remove = false;
2781 if (is_gimple_omp (stmt))
2782 remove = !check_omp_nesting_restrictions (stmt, ctx);
2783 else if (is_gimple_call (stmt))
2785 tree fndecl = gimple_call_fndecl (stmt);
2786 if (fndecl)
2788 if (setjmp_or_longjmp_p (fndecl)
2789 && ctx
2790 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2791 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2793 remove = true;
2794 error_at (gimple_location (stmt),
2795 "setjmp/longjmp inside simd construct");
2797 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2798 switch (DECL_FUNCTION_CODE (fndecl))
2800 case BUILT_IN_GOMP_BARRIER:
2801 case BUILT_IN_GOMP_CANCEL:
2802 case BUILT_IN_GOMP_CANCELLATION_POINT:
2803 case BUILT_IN_GOMP_TASKYIELD:
2804 case BUILT_IN_GOMP_TASKWAIT:
2805 case BUILT_IN_GOMP_TASKGROUP_START:
2806 case BUILT_IN_GOMP_TASKGROUP_END:
2807 remove = !check_omp_nesting_restrictions (stmt, ctx);
2808 break;
2809 default:
2810 break;
2814 if (remove)
2816 stmt = gimple_build_nop ();
2817 gsi_replace (gsi, stmt, false);
2820 *handled_ops_p = true;
2822 switch (gimple_code (stmt))
2824 case GIMPLE_OMP_PARALLEL:
2825 taskreg_nesting_level++;
2826 scan_omp_parallel (gsi, ctx);
2827 taskreg_nesting_level--;
2828 break;
2830 case GIMPLE_OMP_TASK:
2831 taskreg_nesting_level++;
2832 scan_omp_task (gsi, ctx);
2833 taskreg_nesting_level--;
2834 break;
2836 case GIMPLE_OMP_FOR:
2837 scan_omp_for (as_a <gomp_for *> (stmt), ctx);
2838 break;
2840 case GIMPLE_OMP_SECTIONS:
2841 scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
2842 break;
2844 case GIMPLE_OMP_SINGLE:
2845 scan_omp_single (as_a <gomp_single *> (stmt), ctx);
2846 break;
2848 case GIMPLE_OMP_SECTION:
2849 case GIMPLE_OMP_MASTER:
2850 case GIMPLE_OMP_TASKGROUP:
2851 case GIMPLE_OMP_ORDERED:
2852 case GIMPLE_OMP_CRITICAL:
2853 ctx = new_omp_context (stmt, ctx);
2854 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2855 break;
2857 case GIMPLE_OMP_TARGET:
2858 scan_omp_target (as_a <gomp_target *> (stmt), ctx);
2859 break;
2861 case GIMPLE_OMP_TEAMS:
2862 scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
2863 break;
2865 case GIMPLE_BIND:
2867 tree var;
2869 *handled_ops_p = false;
2870 if (ctx)
2871 for (var = gimple_bind_vars (as_a <gbind *> (stmt));
2872 var ;
2873 var = DECL_CHAIN (var))
2874 insert_decl_map (&ctx->cb, var, var);
2876 break;
2877 default:
2878 *handled_ops_p = false;
2879 break;
2882 return NULL_TREE;
2886 /* Scan all the statements starting at the current statement. CTX
2887 contains context information about the OpenMP directives and
2888 clauses found during the scan. */
2890 static void
2891 scan_omp (gimple_seq *body_p, omp_context *ctx)
2893 location_t saved_location;
2894 struct walk_stmt_info wi;
2896 memset (&wi, 0, sizeof (wi));
2897 wi.info = ctx;
2898 wi.want_locations = true;
2900 saved_location = input_location;
2901 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2902 input_location = saved_location;
2905 /* Re-gimplification and code generation routines. */
2907 /* Build a call to GOMP_barrier. */
2909 static gimple
2910 build_omp_barrier (tree lhs)
2912 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2913 : BUILT_IN_GOMP_BARRIER);
2914 gcall *g = gimple_build_call (fndecl, 0);
2915 if (lhs)
2916 gimple_call_set_lhs (g, lhs);
2917 return g;
2920 /* If a context was created for STMT when it was scanned, return it. */
2922 static omp_context *
2923 maybe_lookup_ctx (gimple stmt)
2925 splay_tree_node n;
2926 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2927 return n ? (omp_context *) n->value : NULL;
2931 /* Find the mapping for DECL in CTX or the immediately enclosing
2932 context that has a mapping for DECL.
2934 If CTX is a nested parallel directive, we may have to use the decl
2935 mappings created in CTX's parent context. Suppose that we have the
2936 following parallel nesting (variable UIDs showed for clarity):
2938 iD.1562 = 0;
2939 #omp parallel shared(iD.1562) -> outer parallel
2940 iD.1562 = iD.1562 + 1;
2942 #omp parallel shared (iD.1562) -> inner parallel
2943 iD.1562 = iD.1562 - 1;
2945 Each parallel structure will create a distinct .omp_data_s structure
2946 for copying iD.1562 in/out of the directive:
2948 outer parallel .omp_data_s.1.i -> iD.1562
2949 inner parallel .omp_data_s.2.i -> iD.1562
2951 A shared variable mapping will produce a copy-out operation before
2952 the parallel directive and a copy-in operation after it. So, in
2953 this case we would have:
2955 iD.1562 = 0;
2956 .omp_data_o.1.i = iD.1562;
2957 #omp parallel shared(iD.1562) -> outer parallel
2958 .omp_data_i.1 = &.omp_data_o.1
2959 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2961 .omp_data_o.2.i = iD.1562; -> **
2962 #omp parallel shared(iD.1562) -> inner parallel
2963 .omp_data_i.2 = &.omp_data_o.2
2964 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2967 ** This is a problem. The symbol iD.1562 cannot be referenced
2968 inside the body of the outer parallel region. But since we are
2969 emitting this copy operation while expanding the inner parallel
2970 directive, we need to access the CTX structure of the outer
2971 parallel directive to get the correct mapping:
2973 .omp_data_o.2.i = .omp_data_i.1->i
2975 Since there may be other workshare or parallel directives enclosing
2976 the parallel directive, it may be necessary to walk up the context
2977 parent chain. This is not a problem in general because nested
2978 parallelism happens only rarely. */
2980 static tree
2981 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2983 tree t;
2984 omp_context *up;
2986 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2987 t = maybe_lookup_decl (decl, up);
2989 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2991 return t ? t : decl;
2995 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2996 in outer contexts. */
2998 static tree
2999 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3001 tree t = NULL;
3002 omp_context *up;
3004 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3005 t = maybe_lookup_decl (decl, up);
3007 return t ? t : decl;
3011 /* Construct the initialization value for reduction CLAUSE. */
3013 tree
3014 omp_reduction_init (tree clause, tree type)
3016 location_t loc = OMP_CLAUSE_LOCATION (clause);
3017 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
3019 case PLUS_EXPR:
3020 case MINUS_EXPR:
3021 case BIT_IOR_EXPR:
3022 case BIT_XOR_EXPR:
3023 case TRUTH_OR_EXPR:
3024 case TRUTH_ORIF_EXPR:
3025 case TRUTH_XOR_EXPR:
3026 case NE_EXPR:
3027 return build_zero_cst (type);
3029 case MULT_EXPR:
3030 case TRUTH_AND_EXPR:
3031 case TRUTH_ANDIF_EXPR:
3032 case EQ_EXPR:
3033 return fold_convert_loc (loc, type, integer_one_node);
3035 case BIT_AND_EXPR:
3036 return fold_convert_loc (loc, type, integer_minus_one_node);
3038 case MAX_EXPR:
3039 if (SCALAR_FLOAT_TYPE_P (type))
3041 REAL_VALUE_TYPE max, min;
3042 if (HONOR_INFINITIES (TYPE_MODE (type)))
3044 real_inf (&max);
3045 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3047 else
3048 real_maxval (&min, 1, TYPE_MODE (type));
3049 return build_real (type, min);
3051 else
3053 gcc_assert (INTEGRAL_TYPE_P (type));
3054 return TYPE_MIN_VALUE (type);
3057 case MIN_EXPR:
3058 if (SCALAR_FLOAT_TYPE_P (type))
3060 REAL_VALUE_TYPE max;
3061 if (HONOR_INFINITIES (TYPE_MODE (type)))
3062 real_inf (&max);
3063 else
3064 real_maxval (&max, 0, TYPE_MODE (type));
3065 return build_real (type, max);
3067 else
3069 gcc_assert (INTEGRAL_TYPE_P (type));
3070 return TYPE_MAX_VALUE (type);
3073 default:
3074 gcc_unreachable ();
3078 /* Return alignment to be assumed for var in CLAUSE, which should be
3079 OMP_CLAUSE_ALIGNED. */
3081 static tree
3082 omp_clause_aligned_alignment (tree clause)
3084 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3085 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3087 /* Otherwise return implementation defined alignment. */
3088 unsigned int al = 1;
3089 machine_mode mode, vmode;
3090 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3091 if (vs)
3092 vs = 1 << floor_log2 (vs);
3093 static enum mode_class classes[]
3094 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3095 for (int i = 0; i < 4; i += 2)
3096 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3097 mode != VOIDmode;
3098 mode = GET_MODE_WIDER_MODE (mode))
3100 vmode = targetm.vectorize.preferred_simd_mode (mode);
3101 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3102 continue;
3103 while (vs
3104 && GET_MODE_SIZE (vmode) < vs
3105 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3106 vmode = GET_MODE_2XWIDER_MODE (vmode);
3108 tree type = lang_hooks.types.type_for_mode (mode, 1);
3109 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3110 continue;
3111 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3112 / GET_MODE_SIZE (mode));
3113 if (TYPE_MODE (type) != vmode)
3114 continue;
3115 if (TYPE_ALIGN_UNIT (type) > al)
3116 al = TYPE_ALIGN_UNIT (type);
3118 return build_int_cst (integer_type_node, al);
3121 /* Return maximum possible vectorization factor for the target. */
3123 static int
3124 omp_max_vf (void)
3126 if (!optimize
3127 || optimize_debug
3128 || !flag_tree_loop_optimize
3129 || (!flag_tree_loop_vectorize
3130 && (global_options_set.x_flag_tree_loop_vectorize
3131 || global_options_set.x_flag_tree_vectorize)))
3132 return 1;
3134 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3135 if (vs)
3137 vs = 1 << floor_log2 (vs);
3138 return vs;
3140 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3141 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3142 return GET_MODE_NUNITS (vqimode);
3143 return 1;
3146 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3147 privatization. */
3149 static bool
3150 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3151 tree &idx, tree &lane, tree &ivar, tree &lvar)
3153 if (max_vf == 0)
3155 max_vf = omp_max_vf ();
3156 if (max_vf > 1)
3158 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3159 OMP_CLAUSE_SAFELEN);
3160 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3161 max_vf = 1;
3162 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3163 max_vf) == -1)
3164 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3166 if (max_vf > 1)
3168 idx = create_tmp_var (unsigned_type_node);
3169 lane = create_tmp_var (unsigned_type_node);
3172 if (max_vf == 1)
3173 return false;
3175 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3176 tree avar = create_tmp_var_raw (atype);
3177 if (TREE_ADDRESSABLE (new_var))
3178 TREE_ADDRESSABLE (avar) = 1;
3179 DECL_ATTRIBUTES (avar)
3180 = tree_cons (get_identifier ("omp simd array"), NULL,
3181 DECL_ATTRIBUTES (avar));
3182 gimple_add_tmp_var (avar);
3183 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3184 NULL_TREE, NULL_TREE);
3185 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3186 NULL_TREE, NULL_TREE);
3187 if (DECL_P (new_var))
3189 SET_DECL_VALUE_EXPR (new_var, lvar);
3190 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3192 return true;
3195 /* Helper function of lower_rec_input_clauses. For a reference
3196 in simd reduction, add an underlying variable it will reference. */
3198 static void
3199 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3201 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3202 if (TREE_CONSTANT (z))
3204 const char *name = NULL;
3205 if (DECL_NAME (new_vard))
3206 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3208 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3209 gimple_add_tmp_var (z);
3210 TREE_ADDRESSABLE (z) = 1;
3211 z = build_fold_addr_expr_loc (loc, z);
3212 gimplify_assign (new_vard, z, ilist);
3216 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3217 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3218 private variables. Initialization statements go in ILIST, while calls
3219 to destructors go in DLIST. */
3221 static void
3222 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3223 omp_context *ctx, struct omp_for_data *fd)
3225 tree c, dtor, copyin_seq, x, ptr;
3226 bool copyin_by_ref = false;
3227 bool lastprivate_firstprivate = false;
3228 bool reduction_omp_orig_ref = false;
3229 int pass;
3230 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3231 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3232 int max_vf = 0;
3233 tree lane = NULL_TREE, idx = NULL_TREE;
3234 tree ivar = NULL_TREE, lvar = NULL_TREE;
3235 gimple_seq llist[2] = { NULL, NULL };
3237 copyin_seq = NULL;
3239 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3240 with data sharing clauses referencing variable sized vars. That
3241 is unnecessarily hard to support and very unlikely to result in
3242 vectorized code anyway. */
3243 if (is_simd)
3244 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3245 switch (OMP_CLAUSE_CODE (c))
3247 case OMP_CLAUSE_LINEAR:
3248 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3249 max_vf = 1;
3250 /* FALLTHRU */
3251 case OMP_CLAUSE_REDUCTION:
3252 case OMP_CLAUSE_PRIVATE:
3253 case OMP_CLAUSE_FIRSTPRIVATE:
3254 case OMP_CLAUSE_LASTPRIVATE:
3255 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3256 max_vf = 1;
3257 break;
3258 default:
3259 continue;
3262 /* Do all the fixed sized types in the first pass, and the variable sized
3263 types in the second pass. This makes sure that the scalar arguments to
3264 the variable sized types are processed before we use them in the
3265 variable sized operations. */
3266 for (pass = 0; pass < 2; ++pass)
3268 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3270 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3271 tree var, new_var;
3272 bool by_ref;
3273 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3275 switch (c_kind)
3277 case OMP_CLAUSE_PRIVATE:
3278 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3279 continue;
3280 break;
3281 case OMP_CLAUSE_SHARED:
3282 /* Ignore shared directives in teams construct. */
3283 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3284 continue;
3285 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3287 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3288 continue;
3290 case OMP_CLAUSE_FIRSTPRIVATE:
3291 case OMP_CLAUSE_COPYIN:
3292 case OMP_CLAUSE_LINEAR:
3293 break;
3294 case OMP_CLAUSE_REDUCTION:
3295 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3296 reduction_omp_orig_ref = true;
3297 break;
3298 case OMP_CLAUSE__LOOPTEMP_:
3299 /* Handle _looptemp_ clauses only on parallel. */
3300 if (fd)
3301 continue;
3302 break;
3303 case OMP_CLAUSE_LASTPRIVATE:
3304 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3306 lastprivate_firstprivate = true;
3307 if (pass != 0)
3308 continue;
3310 /* Even without corresponding firstprivate, if
3311 decl is Fortran allocatable, it needs outer var
3312 reference. */
3313 else if (pass == 0
3314 && lang_hooks.decls.omp_private_outer_ref
3315 (OMP_CLAUSE_DECL (c)))
3316 lastprivate_firstprivate = true;
3317 break;
3318 case OMP_CLAUSE_ALIGNED:
3319 if (pass == 0)
3320 continue;
3321 var = OMP_CLAUSE_DECL (c);
3322 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3323 && !is_global_var (var))
3325 new_var = maybe_lookup_decl (var, ctx);
3326 if (new_var == NULL_TREE)
3327 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3328 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3329 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3330 omp_clause_aligned_alignment (c));
3331 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3332 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3333 gimplify_and_add (x, ilist);
3335 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3336 && is_global_var (var))
3338 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3339 new_var = lookup_decl (var, ctx);
3340 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3341 t = build_fold_addr_expr_loc (clause_loc, t);
3342 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3343 t = build_call_expr_loc (clause_loc, t2, 2, t,
3344 omp_clause_aligned_alignment (c));
3345 t = fold_convert_loc (clause_loc, ptype, t);
3346 x = create_tmp_var (ptype);
3347 t = build2 (MODIFY_EXPR, ptype, x, t);
3348 gimplify_and_add (t, ilist);
3349 t = build_simple_mem_ref_loc (clause_loc, x);
3350 SET_DECL_VALUE_EXPR (new_var, t);
3351 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3353 continue;
3354 default:
3355 continue;
3358 new_var = var = OMP_CLAUSE_DECL (c);
3359 if (c_kind != OMP_CLAUSE_COPYIN)
3360 new_var = lookup_decl (var, ctx);
3362 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3364 if (pass != 0)
3365 continue;
3367 else if (is_variable_sized (var))
3369 /* For variable sized types, we need to allocate the
3370 actual storage here. Call alloca and store the
3371 result in the pointer decl that we created elsewhere. */
3372 if (pass == 0)
3373 continue;
3375 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3377 gcall *stmt;
3378 tree tmp, atmp;
3380 ptr = DECL_VALUE_EXPR (new_var);
3381 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3382 ptr = TREE_OPERAND (ptr, 0);
3383 gcc_assert (DECL_P (ptr));
3384 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3386 /* void *tmp = __builtin_alloca */
3387 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3388 stmt = gimple_build_call (atmp, 1, x);
3389 tmp = create_tmp_var_raw (ptr_type_node);
3390 gimple_add_tmp_var (tmp);
3391 gimple_call_set_lhs (stmt, tmp);
3393 gimple_seq_add_stmt (ilist, stmt);
3395 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3396 gimplify_assign (ptr, x, ilist);
3399 else if (is_reference (var))
3401 /* For references that are being privatized for Fortran,
3402 allocate new backing storage for the new pointer
3403 variable. This allows us to avoid changing all the
3404 code that expects a pointer to something that expects
3405 a direct variable. */
3406 if (pass == 0)
3407 continue;
3409 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3410 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3412 x = build_receiver_ref (var, false, ctx);
3413 x = build_fold_addr_expr_loc (clause_loc, x);
3415 else if (TREE_CONSTANT (x))
3417 /* For reduction in SIMD loop, defer adding the
3418 initialization of the reference, because if we decide
3419 to use SIMD array for it, the initilization could cause
3420 expansion ICE. */
3421 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3422 x = NULL_TREE;
3423 else
3425 const char *name = NULL;
3426 if (DECL_NAME (var))
3427 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3429 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3430 name);
3431 gimple_add_tmp_var (x);
3432 TREE_ADDRESSABLE (x) = 1;
3433 x = build_fold_addr_expr_loc (clause_loc, x);
3436 else
3438 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3439 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3442 if (x)
3444 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3445 gimplify_assign (new_var, x, ilist);
3448 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3450 else if (c_kind == OMP_CLAUSE_REDUCTION
3451 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3453 if (pass == 0)
3454 continue;
3456 else if (pass != 0)
3457 continue;
3459 switch (OMP_CLAUSE_CODE (c))
3461 case OMP_CLAUSE_SHARED:
3462 /* Ignore shared directives in teams construct. */
3463 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3464 continue;
3465 /* Shared global vars are just accessed directly. */
3466 if (is_global_var (new_var))
3467 break;
3468 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3469 needs to be delayed until after fixup_child_record_type so
3470 that we get the correct type during the dereference. */
3471 by_ref = use_pointer_for_field (var, ctx);
3472 x = build_receiver_ref (var, by_ref, ctx);
3473 SET_DECL_VALUE_EXPR (new_var, x);
3474 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3476 /* ??? If VAR is not passed by reference, and the variable
3477 hasn't been initialized yet, then we'll get a warning for
3478 the store into the omp_data_s structure. Ideally, we'd be
3479 able to notice this and not store anything at all, but
3480 we're generating code too early. Suppress the warning. */
3481 if (!by_ref)
3482 TREE_NO_WARNING (var) = 1;
3483 break;
3485 case OMP_CLAUSE_LASTPRIVATE:
3486 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3487 break;
3488 /* FALLTHRU */
3490 case OMP_CLAUSE_PRIVATE:
3491 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3492 x = build_outer_var_ref (var, ctx);
3493 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3495 if (is_task_ctx (ctx))
3496 x = build_receiver_ref (var, false, ctx);
3497 else
3498 x = build_outer_var_ref (var, ctx);
3500 else
3501 x = NULL;
3502 do_private:
3503 tree nx;
3504 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3505 if (is_simd)
3507 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3508 if ((TREE_ADDRESSABLE (new_var) || nx || y
3509 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3510 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3511 idx, lane, ivar, lvar))
3513 if (nx)
3514 x = lang_hooks.decls.omp_clause_default_ctor
3515 (c, unshare_expr (ivar), x);
3516 if (nx && x)
3517 gimplify_and_add (x, &llist[0]);
3518 if (y)
3520 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3521 if (y)
3523 gimple_seq tseq = NULL;
3525 dtor = y;
3526 gimplify_stmt (&dtor, &tseq);
3527 gimple_seq_add_seq (&llist[1], tseq);
3530 break;
3533 if (nx)
3534 gimplify_and_add (nx, ilist);
3535 /* FALLTHRU */
3537 do_dtor:
3538 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3539 if (x)
3541 gimple_seq tseq = NULL;
3543 dtor = x;
3544 gimplify_stmt (&dtor, &tseq);
3545 gimple_seq_add_seq (dlist, tseq);
3547 break;
3549 case OMP_CLAUSE_LINEAR:
3550 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3551 goto do_firstprivate;
3552 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3553 x = NULL;
3554 else
3555 x = build_outer_var_ref (var, ctx);
3556 goto do_private;
3558 case OMP_CLAUSE_FIRSTPRIVATE:
3559 if (is_task_ctx (ctx))
3561 if (is_reference (var) || is_variable_sized (var))
3562 goto do_dtor;
3563 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3564 ctx))
3565 || use_pointer_for_field (var, NULL))
3567 x = build_receiver_ref (var, false, ctx);
3568 SET_DECL_VALUE_EXPR (new_var, x);
3569 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3570 goto do_dtor;
3573 do_firstprivate:
3574 x = build_outer_var_ref (var, ctx);
3575 if (is_simd)
3577 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3578 && gimple_omp_for_combined_into_p (ctx->stmt))
3580 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3581 tree stept = TREE_TYPE (t);
3582 tree ct = find_omp_clause (clauses,
3583 OMP_CLAUSE__LOOPTEMP_);
3584 gcc_assert (ct);
3585 tree l = OMP_CLAUSE_DECL (ct);
3586 tree n1 = fd->loop.n1;
3587 tree step = fd->loop.step;
3588 tree itype = TREE_TYPE (l);
3589 if (POINTER_TYPE_P (itype))
3590 itype = signed_type_for (itype);
3591 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3592 if (TYPE_UNSIGNED (itype)
3593 && fd->loop.cond_code == GT_EXPR)
3594 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3595 fold_build1 (NEGATE_EXPR, itype, l),
3596 fold_build1 (NEGATE_EXPR,
3597 itype, step));
3598 else
3599 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3600 t = fold_build2 (MULT_EXPR, stept,
3601 fold_convert (stept, l), t);
3603 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3605 x = lang_hooks.decls.omp_clause_linear_ctor
3606 (c, new_var, x, t);
3607 gimplify_and_add (x, ilist);
3608 goto do_dtor;
3611 if (POINTER_TYPE_P (TREE_TYPE (x)))
3612 x = fold_build2 (POINTER_PLUS_EXPR,
3613 TREE_TYPE (x), x, t);
3614 else
3615 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3618 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3619 || TREE_ADDRESSABLE (new_var))
3620 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3621 idx, lane, ivar, lvar))
3623 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3625 tree iv = create_tmp_var (TREE_TYPE (new_var));
3626 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3627 gimplify_and_add (x, ilist);
3628 gimple_stmt_iterator gsi
3629 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3630 gassign *g
3631 = gimple_build_assign (unshare_expr (lvar), iv);
3632 gsi_insert_before_without_update (&gsi, g,
3633 GSI_SAME_STMT);
3634 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3635 enum tree_code code = PLUS_EXPR;
3636 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3637 code = POINTER_PLUS_EXPR;
3638 g = gimple_build_assign (iv, code, iv, t);
3639 gsi_insert_before_without_update (&gsi, g,
3640 GSI_SAME_STMT);
3641 break;
3643 x = lang_hooks.decls.omp_clause_copy_ctor
3644 (c, unshare_expr (ivar), x);
3645 gimplify_and_add (x, &llist[0]);
3646 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3647 if (x)
3649 gimple_seq tseq = NULL;
3651 dtor = x;
3652 gimplify_stmt (&dtor, &tseq);
3653 gimple_seq_add_seq (&llist[1], tseq);
3655 break;
3658 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3659 gimplify_and_add (x, ilist);
3660 goto do_dtor;
3662 case OMP_CLAUSE__LOOPTEMP_:
3663 gcc_assert (is_parallel_ctx (ctx));
3664 x = build_outer_var_ref (var, ctx);
3665 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3666 gimplify_and_add (x, ilist);
3667 break;
3669 case OMP_CLAUSE_COPYIN:
3670 by_ref = use_pointer_for_field (var, NULL);
3671 x = build_receiver_ref (var, by_ref, ctx);
3672 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3673 append_to_statement_list (x, &copyin_seq);
3674 copyin_by_ref |= by_ref;
3675 break;
3677 case OMP_CLAUSE_REDUCTION:
3678 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3680 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3681 gimple tseq;
3682 x = build_outer_var_ref (var, ctx);
3684 if (is_reference (var)
3685 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3686 TREE_TYPE (x)))
3687 x = build_fold_addr_expr_loc (clause_loc, x);
3688 SET_DECL_VALUE_EXPR (placeholder, x);
3689 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3690 tree new_vard = new_var;
3691 if (is_reference (var))
3693 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3694 new_vard = TREE_OPERAND (new_var, 0);
3695 gcc_assert (DECL_P (new_vard));
3697 if (is_simd
3698 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3699 idx, lane, ivar, lvar))
3701 if (new_vard == new_var)
3703 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3704 SET_DECL_VALUE_EXPR (new_var, ivar);
3706 else
3708 SET_DECL_VALUE_EXPR (new_vard,
3709 build_fold_addr_expr (ivar));
3710 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3712 x = lang_hooks.decls.omp_clause_default_ctor
3713 (c, unshare_expr (ivar),
3714 build_outer_var_ref (var, ctx));
3715 if (x)
3716 gimplify_and_add (x, &llist[0]);
3717 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3719 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3720 lower_omp (&tseq, ctx);
3721 gimple_seq_add_seq (&llist[0], tseq);
3723 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3724 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3725 lower_omp (&tseq, ctx);
3726 gimple_seq_add_seq (&llist[1], tseq);
3727 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3728 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3729 if (new_vard == new_var)
3730 SET_DECL_VALUE_EXPR (new_var, lvar);
3731 else
3732 SET_DECL_VALUE_EXPR (new_vard,
3733 build_fold_addr_expr (lvar));
3734 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3735 if (x)
3737 tseq = NULL;
3738 dtor = x;
3739 gimplify_stmt (&dtor, &tseq);
3740 gimple_seq_add_seq (&llist[1], tseq);
3742 break;
3744 /* If this is a reference to constant size reduction var
3745 with placeholder, we haven't emitted the initializer
3746 for it because it is undesirable if SIMD arrays are used.
3747 But if they aren't used, we need to emit the deferred
3748 initialization now. */
3749 else if (is_reference (var) && is_simd)
3750 handle_simd_reference (clause_loc, new_vard, ilist);
3751 x = lang_hooks.decls.omp_clause_default_ctor
3752 (c, unshare_expr (new_var),
3753 build_outer_var_ref (var, ctx));
3754 if (x)
3755 gimplify_and_add (x, ilist);
3756 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3758 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3759 lower_omp (&tseq, ctx);
3760 gimple_seq_add_seq (ilist, tseq);
3762 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3763 if (is_simd)
3765 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3766 lower_omp (&tseq, ctx);
3767 gimple_seq_add_seq (dlist, tseq);
3768 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3770 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3771 goto do_dtor;
3773 else
3775 x = omp_reduction_init (c, TREE_TYPE (new_var));
3776 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3777 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3779 /* reduction(-:var) sums up the partial results, so it
3780 acts identically to reduction(+:var). */
3781 if (code == MINUS_EXPR)
3782 code = PLUS_EXPR;
3784 tree new_vard = new_var;
3785 if (is_simd && is_reference (var))
3787 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3788 new_vard = TREE_OPERAND (new_var, 0);
3789 gcc_assert (DECL_P (new_vard));
3791 if (is_simd
3792 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3793 idx, lane, ivar, lvar))
3795 tree ref = build_outer_var_ref (var, ctx);
3797 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3799 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3800 ref = build_outer_var_ref (var, ctx);
3801 gimplify_assign (ref, x, &llist[1]);
3803 if (new_vard != new_var)
3805 SET_DECL_VALUE_EXPR (new_vard,
3806 build_fold_addr_expr (lvar));
3807 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3810 else
3812 if (is_reference (var) && is_simd)
3813 handle_simd_reference (clause_loc, new_vard, ilist);
3814 gimplify_assign (new_var, x, ilist);
3815 if (is_simd)
3817 tree ref = build_outer_var_ref (var, ctx);
3819 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3820 ref = build_outer_var_ref (var, ctx);
3821 gimplify_assign (ref, x, dlist);
3825 break;
3827 default:
3828 gcc_unreachable ();
3833 if (lane)
3835 tree uid = create_tmp_var (ptr_type_node, "simduid");
3836 /* Don't want uninit warnings on simduid, it is always uninitialized,
3837 but we use it not for the value, but for the DECL_UID only. */
3838 TREE_NO_WARNING (uid) = 1;
3839 gimple g
3840 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3841 gimple_call_set_lhs (g, lane);
3842 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3843 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3844 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3845 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3846 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3847 gimple_omp_for_set_clauses (ctx->stmt, c);
3848 g = gimple_build_assign (lane, INTEGER_CST,
3849 build_int_cst (unsigned_type_node, 0));
3850 gimple_seq_add_stmt (ilist, g);
3851 for (int i = 0; i < 2; i++)
3852 if (llist[i])
3854 tree vf = create_tmp_var (unsigned_type_node);
3855 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3856 gimple_call_set_lhs (g, vf);
3857 gimple_seq *seq = i == 0 ? ilist : dlist;
3858 gimple_seq_add_stmt (seq, g);
3859 tree t = build_int_cst (unsigned_type_node, 0);
3860 g = gimple_build_assign (idx, INTEGER_CST, t);
3861 gimple_seq_add_stmt (seq, g);
3862 tree body = create_artificial_label (UNKNOWN_LOCATION);
3863 tree header = create_artificial_label (UNKNOWN_LOCATION);
3864 tree end = create_artificial_label (UNKNOWN_LOCATION);
3865 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3866 gimple_seq_add_stmt (seq, gimple_build_label (body));
3867 gimple_seq_add_seq (seq, llist[i]);
3868 t = build_int_cst (unsigned_type_node, 1);
3869 g = gimple_build_assign (idx, PLUS_EXPR, idx, t);
3870 gimple_seq_add_stmt (seq, g);
3871 gimple_seq_add_stmt (seq, gimple_build_label (header));
3872 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3873 gimple_seq_add_stmt (seq, g);
3874 gimple_seq_add_stmt (seq, gimple_build_label (end));
3878 /* The copyin sequence is not to be executed by the main thread, since
3879 that would result in self-copies. Perhaps not visible to scalars,
3880 but it certainly is to C++ operator=. */
3881 if (copyin_seq)
3883 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3885 x = build2 (NE_EXPR, boolean_type_node, x,
3886 build_int_cst (TREE_TYPE (x), 0));
3887 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3888 gimplify_and_add (x, ilist);
3891 /* If any copyin variable is passed by reference, we must ensure the
3892 master thread doesn't modify it before it is copied over in all
3893 threads. Similarly for variables in both firstprivate and
3894 lastprivate clauses we need to ensure the lastprivate copying
3895 happens after firstprivate copying in all threads. And similarly
3896 for UDRs if initializer expression refers to omp_orig. */
3897 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3899 /* Don't add any barrier for #pragma omp simd or
3900 #pragma omp distribute. */
3901 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3902 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
3903 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3906 /* If max_vf is non-zero, then we can use only a vectorization factor
3907 up to the max_vf we chose. So stick it into the safelen clause. */
3908 if (max_vf)
3910 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3911 OMP_CLAUSE_SAFELEN);
3912 if (c == NULL_TREE
3913 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
3914 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3915 max_vf) == 1))
3917 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3918 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3919 max_vf);
3920 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3921 gimple_omp_for_set_clauses (ctx->stmt, c);
3927 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3928 both parallel and workshare constructs. PREDICATE may be NULL if it's
3929 always true. */
3931 static void
3932 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3933 omp_context *ctx)
3935 tree x, c, label = NULL, orig_clauses = clauses;
3936 bool par_clauses = false;
3937 tree simduid = NULL, lastlane = NULL;
3939 /* Early exit if there are no lastprivate or linear clauses. */
3940 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3941 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3942 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3943 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3944 break;
3945 if (clauses == NULL)
3947 /* If this was a workshare clause, see if it had been combined
3948 with its parallel. In that case, look for the clauses on the
3949 parallel statement itself. */
3950 if (is_parallel_ctx (ctx))
3951 return;
3953 ctx = ctx->outer;
3954 if (ctx == NULL || !is_parallel_ctx (ctx))
3955 return;
3957 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3958 OMP_CLAUSE_LASTPRIVATE);
3959 if (clauses == NULL)
3960 return;
3961 par_clauses = true;
3964 if (predicate)
3966 gcond *stmt;
3967 tree label_true, arm1, arm2;
3969 label = create_artificial_label (UNKNOWN_LOCATION);
3970 label_true = create_artificial_label (UNKNOWN_LOCATION);
3971 arm1 = TREE_OPERAND (predicate, 0);
3972 arm2 = TREE_OPERAND (predicate, 1);
3973 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3974 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3975 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3976 label_true, label);
3977 gimple_seq_add_stmt (stmt_list, stmt);
3978 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3981 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3982 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3984 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3985 if (simduid)
3986 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3989 for (c = clauses; c ;)
3991 tree var, new_var;
3992 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3994 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3995 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3996 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3998 var = OMP_CLAUSE_DECL (c);
3999 new_var = lookup_decl (var, ctx);
4001 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
4003 tree val = DECL_VALUE_EXPR (new_var);
4004 if (TREE_CODE (val) == ARRAY_REF
4005 && VAR_P (TREE_OPERAND (val, 0))
4006 && lookup_attribute ("omp simd array",
4007 DECL_ATTRIBUTES (TREE_OPERAND (val,
4008 0))))
4010 if (lastlane == NULL)
4012 lastlane = create_tmp_var (unsigned_type_node);
4013 gcall *g
4014 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
4015 2, simduid,
4016 TREE_OPERAND (val, 1));
4017 gimple_call_set_lhs (g, lastlane);
4018 gimple_seq_add_stmt (stmt_list, g);
4020 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
4021 TREE_OPERAND (val, 0), lastlane,
4022 NULL_TREE, NULL_TREE);
4026 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
4027 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
4029 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
4030 gimple_seq_add_seq (stmt_list,
4031 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
4032 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
4034 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4035 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
4037 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
4038 gimple_seq_add_seq (stmt_list,
4039 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
4040 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
4043 x = build_outer_var_ref (var, ctx);
4044 if (is_reference (var))
4045 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4046 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
4047 gimplify_and_add (x, stmt_list);
4049 c = OMP_CLAUSE_CHAIN (c);
4050 if (c == NULL && !par_clauses)
4052 /* If this was a workshare clause, see if it had been combined
4053 with its parallel. In that case, continue looking for the
4054 clauses also on the parallel statement itself. */
4055 if (is_parallel_ctx (ctx))
4056 break;
4058 ctx = ctx->outer;
4059 if (ctx == NULL || !is_parallel_ctx (ctx))
4060 break;
4062 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
4063 OMP_CLAUSE_LASTPRIVATE);
4064 par_clauses = true;
4068 if (label)
4069 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
4073 /* Generate code to implement the REDUCTION clauses. */
4075 static void
4076 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
4078 gimple_seq sub_seq = NULL;
4079 gimple stmt;
4080 tree x, c;
4081 int count = 0;
4083 /* SIMD reductions are handled in lower_rec_input_clauses. */
4084 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4085 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
4086 return;
4088 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
4089 update in that case, otherwise use a lock. */
4090 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
4091 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
4093 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4095 /* Never use OMP_ATOMIC for array reductions or UDRs. */
4096 count = -1;
4097 break;
4099 count++;
4102 if (count == 0)
4103 return;
4105 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4107 tree var, ref, new_var;
4108 enum tree_code code;
4109 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4111 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
4112 continue;
4114 var = OMP_CLAUSE_DECL (c);
4115 new_var = lookup_decl (var, ctx);
4116 if (is_reference (var))
4117 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4118 ref = build_outer_var_ref (var, ctx);
4119 code = OMP_CLAUSE_REDUCTION_CODE (c);
4121 /* reduction(-:var) sums up the partial results, so it acts
4122 identically to reduction(+:var). */
4123 if (code == MINUS_EXPR)
4124 code = PLUS_EXPR;
4126 if (count == 1)
4128 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
4130 addr = save_expr (addr);
4131 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
4132 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
4133 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
4134 gimplify_and_add (x, stmt_seqp);
4135 return;
4138 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4140 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4142 if (is_reference (var)
4143 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4144 TREE_TYPE (ref)))
4145 ref = build_fold_addr_expr_loc (clause_loc, ref);
4146 SET_DECL_VALUE_EXPR (placeholder, ref);
4147 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4148 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
4149 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
4150 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4151 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
4153 else
4155 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4156 ref = build_outer_var_ref (var, ctx);
4157 gimplify_assign (ref, x, &sub_seq);
4161 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4163 gimple_seq_add_stmt (stmt_seqp, stmt);
4165 gimple_seq_add_seq (stmt_seqp, sub_seq);
4167 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4169 gimple_seq_add_stmt (stmt_seqp, stmt);
4173 /* Generate code to implement the COPYPRIVATE clauses. */
4175 static void
4176 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4177 omp_context *ctx)
4179 tree c;
4181 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4183 tree var, new_var, ref, x;
4184 bool by_ref;
4185 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4187 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4188 continue;
4190 var = OMP_CLAUSE_DECL (c);
4191 by_ref = use_pointer_for_field (var, NULL);
4193 ref = build_sender_ref (var, ctx);
4194 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4195 if (by_ref)
4197 x = build_fold_addr_expr_loc (clause_loc, new_var);
4198 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4200 gimplify_assign (ref, x, slist);
4202 ref = build_receiver_ref (var, false, ctx);
4203 if (by_ref)
4205 ref = fold_convert_loc (clause_loc,
4206 build_pointer_type (TREE_TYPE (new_var)),
4207 ref);
4208 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4210 if (is_reference (var))
4212 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4213 ref = build_simple_mem_ref_loc (clause_loc, ref);
4214 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4216 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4217 gimplify_and_add (x, rlist);
4222 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4223 and REDUCTION from the sender (aka parent) side. */
4225 static void
4226 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4227 omp_context *ctx)
4229 tree c;
4231 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4233 tree val, ref, x, var;
4234 bool by_ref, do_in = false, do_out = false;
4235 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4237 switch (OMP_CLAUSE_CODE (c))
4239 case OMP_CLAUSE_PRIVATE:
4240 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4241 break;
4242 continue;
4243 case OMP_CLAUSE_FIRSTPRIVATE:
4244 case OMP_CLAUSE_COPYIN:
4245 case OMP_CLAUSE_LASTPRIVATE:
4246 case OMP_CLAUSE_REDUCTION:
4247 case OMP_CLAUSE__LOOPTEMP_:
4248 break;
4249 default:
4250 continue;
4253 val = OMP_CLAUSE_DECL (c);
4254 var = lookup_decl_in_outer_ctx (val, ctx);
4256 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4257 && is_global_var (var))
4258 continue;
4259 if (is_variable_sized (val))
4260 continue;
4261 by_ref = use_pointer_for_field (val, NULL);
4263 switch (OMP_CLAUSE_CODE (c))
4265 case OMP_CLAUSE_PRIVATE:
4266 case OMP_CLAUSE_FIRSTPRIVATE:
4267 case OMP_CLAUSE_COPYIN:
4268 case OMP_CLAUSE__LOOPTEMP_:
4269 do_in = true;
4270 break;
4272 case OMP_CLAUSE_LASTPRIVATE:
4273 if (by_ref || is_reference (val))
4275 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4276 continue;
4277 do_in = true;
4279 else
4281 do_out = true;
4282 if (lang_hooks.decls.omp_private_outer_ref (val))
4283 do_in = true;
4285 break;
4287 case OMP_CLAUSE_REDUCTION:
4288 do_in = true;
4289 do_out = !(by_ref || is_reference (val));
4290 break;
4292 default:
4293 gcc_unreachable ();
4296 if (do_in)
4298 ref = build_sender_ref (val, ctx);
4299 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4300 gimplify_assign (ref, x, ilist);
4301 if (is_task_ctx (ctx))
4302 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4305 if (do_out)
4307 ref = build_sender_ref (val, ctx);
4308 gimplify_assign (var, ref, olist);
4313 /* Generate code to implement SHARED from the sender (aka parent)
4314 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4315 list things that got automatically shared. */
4317 static void
4318 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4320 tree var, ovar, nvar, f, x, record_type;
4322 if (ctx->record_type == NULL)
4323 return;
4325 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4326 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4328 ovar = DECL_ABSTRACT_ORIGIN (f);
4329 nvar = maybe_lookup_decl (ovar, ctx);
4330 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4331 continue;
4333 /* If CTX is a nested parallel directive. Find the immediately
4334 enclosing parallel or workshare construct that contains a
4335 mapping for OVAR. */
4336 var = lookup_decl_in_outer_ctx (ovar, ctx);
4338 if (use_pointer_for_field (ovar, ctx))
4340 x = build_sender_ref (ovar, ctx);
4341 var = build_fold_addr_expr (var);
4342 gimplify_assign (x, var, ilist);
4344 else
4346 x = build_sender_ref (ovar, ctx);
4347 gimplify_assign (x, var, ilist);
4349 if (!TREE_READONLY (var)
4350 /* We don't need to receive a new reference to a result
4351 or parm decl. In fact we may not store to it as we will
4352 invalidate any pending RSO and generate wrong gimple
4353 during inlining. */
4354 && !((TREE_CODE (var) == RESULT_DECL
4355 || TREE_CODE (var) == PARM_DECL)
4356 && DECL_BY_REFERENCE (var)))
4358 x = build_sender_ref (ovar, ctx);
4359 gimplify_assign (var, x, olist);
4366 /* A convenience function to build an empty GIMPLE_COND with just the
4367 condition. */
4369 static gcond *
4370 gimple_build_cond_empty (tree cond)
4372 enum tree_code pred_code;
4373 tree lhs, rhs;
4375 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4376 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4380 /* Build the function calls to GOMP_parallel_start etc to actually
4381 generate the parallel operation. REGION is the parallel region
4382 being expanded. BB is the block where to insert the code. WS_ARGS
4383 will be set if this is a call to a combined parallel+workshare
4384 construct, it contains the list of additional arguments needed by
4385 the workshare construct. */
4387 static void
4388 expand_parallel_call (struct omp_region *region, basic_block bb,
4389 gomp_parallel *entry_stmt,
4390 vec<tree, va_gc> *ws_args)
4392 tree t, t1, t2, val, cond, c, clauses, flags;
4393 gimple_stmt_iterator gsi;
4394 gimple stmt;
4395 enum built_in_function start_ix;
4396 int start_ix2;
4397 location_t clause_loc;
4398 vec<tree, va_gc> *args;
4400 clauses = gimple_omp_parallel_clauses (entry_stmt);
4402 /* Determine what flavor of GOMP_parallel we will be
4403 emitting. */
4404 start_ix = BUILT_IN_GOMP_PARALLEL;
4405 if (is_combined_parallel (region))
4407 switch (region->inner->type)
4409 case GIMPLE_OMP_FOR:
4410 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4411 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4412 + (region->inner->sched_kind
4413 == OMP_CLAUSE_SCHEDULE_RUNTIME
4414 ? 3 : region->inner->sched_kind));
4415 start_ix = (enum built_in_function)start_ix2;
4416 break;
4417 case GIMPLE_OMP_SECTIONS:
4418 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4419 break;
4420 default:
4421 gcc_unreachable ();
4425 /* By default, the value of NUM_THREADS is zero (selected at run time)
4426 and there is no conditional. */
4427 cond = NULL_TREE;
4428 val = build_int_cst (unsigned_type_node, 0);
4429 flags = build_int_cst (unsigned_type_node, 0);
4431 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4432 if (c)
4433 cond = OMP_CLAUSE_IF_EXPR (c);
4435 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4436 if (c)
4438 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4439 clause_loc = OMP_CLAUSE_LOCATION (c);
4441 else
4442 clause_loc = gimple_location (entry_stmt);
4444 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4445 if (c)
4446 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4448 /* Ensure 'val' is of the correct type. */
4449 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4451 /* If we found the clause 'if (cond)', build either
4452 (cond != 0) or (cond ? val : 1u). */
4453 if (cond)
4455 cond = gimple_boolify (cond);
4457 if (integer_zerop (val))
4458 val = fold_build2_loc (clause_loc,
4459 EQ_EXPR, unsigned_type_node, cond,
4460 build_int_cst (TREE_TYPE (cond), 0));
4461 else
4463 basic_block cond_bb, then_bb, else_bb;
4464 edge e, e_then, e_else;
4465 tree tmp_then, tmp_else, tmp_join, tmp_var;
4467 tmp_var = create_tmp_var (TREE_TYPE (val));
4468 if (gimple_in_ssa_p (cfun))
4470 tmp_then = make_ssa_name (tmp_var);
4471 tmp_else = make_ssa_name (tmp_var);
4472 tmp_join = make_ssa_name (tmp_var);
4474 else
4476 tmp_then = tmp_var;
4477 tmp_else = tmp_var;
4478 tmp_join = tmp_var;
4481 e = split_block (bb, NULL);
4482 cond_bb = e->src;
4483 bb = e->dest;
4484 remove_edge (e);
4486 then_bb = create_empty_bb (cond_bb);
4487 else_bb = create_empty_bb (then_bb);
4488 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4489 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4491 stmt = gimple_build_cond_empty (cond);
4492 gsi = gsi_start_bb (cond_bb);
4493 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4495 gsi = gsi_start_bb (then_bb);
4496 stmt = gimple_build_assign (tmp_then, val);
4497 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4499 gsi = gsi_start_bb (else_bb);
4500 stmt = gimple_build_assign
4501 (tmp_else, build_int_cst (unsigned_type_node, 1));
4502 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4504 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4505 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4506 add_bb_to_loop (then_bb, cond_bb->loop_father);
4507 add_bb_to_loop (else_bb, cond_bb->loop_father);
4508 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4509 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4511 if (gimple_in_ssa_p (cfun))
4513 gphi *phi = create_phi_node (tmp_join, bb);
4514 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4515 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4518 val = tmp_join;
4521 gsi = gsi_start_bb (bb);
4522 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4523 false, GSI_CONTINUE_LINKING);
4526 gsi = gsi_last_bb (bb);
4527 t = gimple_omp_parallel_data_arg (entry_stmt);
4528 if (t == NULL)
4529 t1 = null_pointer_node;
4530 else
4531 t1 = build_fold_addr_expr (t);
4532 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4534 vec_alloc (args, 4 + vec_safe_length (ws_args));
4535 args->quick_push (t2);
4536 args->quick_push (t1);
4537 args->quick_push (val);
4538 if (ws_args)
4539 args->splice (*ws_args);
4540 args->quick_push (flags);
4542 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4543 builtin_decl_explicit (start_ix), args);
4545 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4546 false, GSI_CONTINUE_LINKING);
4549 /* Insert a function call whose name is FUNC_NAME with the information from
4550 ENTRY_STMT into the basic_block BB. */
4552 static void
4553 expand_cilk_for_call (basic_block bb, gomp_parallel *entry_stmt,
4554 vec <tree, va_gc> *ws_args)
4556 tree t, t1, t2;
4557 gimple_stmt_iterator gsi;
4558 vec <tree, va_gc> *args;
4560 gcc_assert (vec_safe_length (ws_args) == 2);
4561 tree func_name = (*ws_args)[0];
4562 tree grain = (*ws_args)[1];
4564 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
4565 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
4566 gcc_assert (count != NULL_TREE);
4567 count = OMP_CLAUSE_OPERAND (count, 0);
4569 gsi = gsi_last_bb (bb);
4570 t = gimple_omp_parallel_data_arg (entry_stmt);
4571 if (t == NULL)
4572 t1 = null_pointer_node;
4573 else
4574 t1 = build_fold_addr_expr (t);
4575 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4577 vec_alloc (args, 4);
4578 args->quick_push (t2);
4579 args->quick_push (t1);
4580 args->quick_push (count);
4581 args->quick_push (grain);
4582 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
4584 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
4585 GSI_CONTINUE_LINKING);
4588 /* Build the function call to GOMP_task to actually
4589 generate the task operation. BB is the block where to insert the code. */
4591 static void
4592 expand_task_call (basic_block bb, gomp_task *entry_stmt)
4594 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4595 gimple_stmt_iterator gsi;
4596 location_t loc = gimple_location (entry_stmt);
4598 clauses = gimple_omp_task_clauses (entry_stmt);
4600 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4601 if (c)
4602 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4603 else
4604 cond = boolean_true_node;
4606 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4607 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4608 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4609 flags = build_int_cst (unsigned_type_node,
4610 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4612 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4613 if (c)
4615 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4616 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4617 build_int_cst (unsigned_type_node, 2),
4618 build_int_cst (unsigned_type_node, 0));
4619 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4621 if (depend)
4622 depend = OMP_CLAUSE_DECL (depend);
4623 else
4624 depend = build_int_cst (ptr_type_node, 0);
4626 gsi = gsi_last_bb (bb);
4627 t = gimple_omp_task_data_arg (entry_stmt);
4628 if (t == NULL)
4629 t2 = null_pointer_node;
4630 else
4631 t2 = build_fold_addr_expr_loc (loc, t);
4632 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4633 t = gimple_omp_task_copy_fn (entry_stmt);
4634 if (t == NULL)
4635 t3 = null_pointer_node;
4636 else
4637 t3 = build_fold_addr_expr_loc (loc, t);
4639 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4640 8, t1, t2, t3,
4641 gimple_omp_task_arg_size (entry_stmt),
4642 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4643 depend);
4645 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4646 false, GSI_CONTINUE_LINKING);
4650 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4651 catch handler and return it. This prevents programs from violating the
4652 structured block semantics with throws. */
4654 static gimple_seq
4655 maybe_catch_exception (gimple_seq body)
4657 gimple g;
4658 tree decl;
4660 if (!flag_exceptions)
4661 return body;
4663 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4664 decl = lang_hooks.eh_protect_cleanup_actions ();
4665 else
4666 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4668 g = gimple_build_eh_must_not_throw (decl);
4669 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4670 GIMPLE_TRY_CATCH);
4672 return gimple_seq_alloc_with_stmt (g);
4675 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4677 static tree
4678 vec2chain (vec<tree, va_gc> *v)
4680 tree chain = NULL_TREE, t;
4681 unsigned ix;
4683 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4685 DECL_CHAIN (t) = chain;
4686 chain = t;
4689 return chain;
4693 /* Remove barriers in REGION->EXIT's block. Note that this is only
4694 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4695 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4696 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4697 removed. */
4699 static void
4700 remove_exit_barrier (struct omp_region *region)
4702 gimple_stmt_iterator gsi;
4703 basic_block exit_bb;
4704 edge_iterator ei;
4705 edge e;
4706 gimple stmt;
4707 int any_addressable_vars = -1;
4709 exit_bb = region->exit;
4711 /* If the parallel region doesn't return, we don't have REGION->EXIT
4712 block at all. */
4713 if (! exit_bb)
4714 return;
4716 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4717 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4718 statements that can appear in between are extremely limited -- no
4719 memory operations at all. Here, we allow nothing at all, so the
4720 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4721 gsi = gsi_last_bb (exit_bb);
4722 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4723 gsi_prev (&gsi);
4724 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4725 return;
4727 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4729 gsi = gsi_last_bb (e->src);
4730 if (gsi_end_p (gsi))
4731 continue;
4732 stmt = gsi_stmt (gsi);
4733 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4734 && !gimple_omp_return_nowait_p (stmt))
4736 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4737 in many cases. If there could be tasks queued, the barrier
4738 might be needed to let the tasks run before some local
4739 variable of the parallel that the task uses as shared
4740 runs out of scope. The task can be spawned either
4741 from within current function (this would be easy to check)
4742 or from some function it calls and gets passed an address
4743 of such a variable. */
4744 if (any_addressable_vars < 0)
4746 gomp_parallel *parallel_stmt
4747 = as_a <gomp_parallel *> (last_stmt (region->entry));
4748 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4749 tree local_decls, block, decl;
4750 unsigned ix;
4752 any_addressable_vars = 0;
4753 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4754 if (TREE_ADDRESSABLE (decl))
4756 any_addressable_vars = 1;
4757 break;
4759 for (block = gimple_block (stmt);
4760 !any_addressable_vars
4761 && block
4762 && TREE_CODE (block) == BLOCK;
4763 block = BLOCK_SUPERCONTEXT (block))
4765 for (local_decls = BLOCK_VARS (block);
4766 local_decls;
4767 local_decls = DECL_CHAIN (local_decls))
4768 if (TREE_ADDRESSABLE (local_decls))
4770 any_addressable_vars = 1;
4771 break;
4773 if (block == gimple_block (parallel_stmt))
4774 break;
4777 if (!any_addressable_vars)
4778 gimple_omp_return_set_nowait (stmt);
4783 static void
4784 remove_exit_barriers (struct omp_region *region)
4786 if (region->type == GIMPLE_OMP_PARALLEL)
4787 remove_exit_barrier (region);
4789 if (region->inner)
4791 region = region->inner;
4792 remove_exit_barriers (region);
4793 while (region->next)
4795 region = region->next;
4796 remove_exit_barriers (region);
4801 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4802 calls. These can't be declared as const functions, but
4803 within one parallel body they are constant, so they can be
4804 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4805 which are declared const. Similarly for task body, except
4806 that in untied task omp_get_thread_num () can change at any task
4807 scheduling point. */
4809 static void
4810 optimize_omp_library_calls (gimple entry_stmt)
4812 basic_block bb;
4813 gimple_stmt_iterator gsi;
4814 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4815 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4816 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4817 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4818 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4819 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4820 OMP_CLAUSE_UNTIED) != NULL);
4822 FOR_EACH_BB_FN (bb, cfun)
4823 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4825 gimple call = gsi_stmt (gsi);
4826 tree decl;
4828 if (is_gimple_call (call)
4829 && (decl = gimple_call_fndecl (call))
4830 && DECL_EXTERNAL (decl)
4831 && TREE_PUBLIC (decl)
4832 && DECL_INITIAL (decl) == NULL)
4834 tree built_in;
4836 if (DECL_NAME (decl) == thr_num_id)
4838 /* In #pragma omp task untied omp_get_thread_num () can change
4839 during the execution of the task region. */
4840 if (untied_task)
4841 continue;
4842 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4844 else if (DECL_NAME (decl) == num_thr_id)
4845 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4846 else
4847 continue;
4849 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4850 || gimple_call_num_args (call) != 0)
4851 continue;
4853 if (flag_exceptions && !TREE_NOTHROW (decl))
4854 continue;
4856 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4857 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4858 TREE_TYPE (TREE_TYPE (built_in))))
4859 continue;
4861 gimple_call_set_fndecl (call, built_in);
4866 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4867 regimplified. */
4869 static tree
4870 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4872 tree t = *tp;
4874 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4875 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4876 return t;
4878 if (TREE_CODE (t) == ADDR_EXPR)
4879 recompute_tree_invariant_for_addr_expr (t);
4881 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4882 return NULL_TREE;
4885 /* Prepend TO = FROM assignment before *GSI_P. */
4887 static void
4888 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4890 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4891 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4892 true, GSI_SAME_STMT);
4893 gimple stmt = gimple_build_assign (to, from);
4894 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4895 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4896 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4898 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4899 gimple_regimplify_operands (stmt, &gsi);
4903 /* Expand the OpenMP parallel or task directive starting at REGION. */
4905 static void
4906 expand_omp_taskreg (struct omp_region *region)
4908 basic_block entry_bb, exit_bb, new_bb;
4909 struct function *child_cfun;
4910 tree child_fn, block, t;
4911 gimple_stmt_iterator gsi;
4912 gimple entry_stmt, stmt;
4913 edge e;
4914 vec<tree, va_gc> *ws_args;
4916 entry_stmt = last_stmt (region->entry);
4917 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4918 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4920 entry_bb = region->entry;
4921 exit_bb = region->exit;
4923 bool is_cilk_for
4924 = (flag_cilkplus
4925 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
4926 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
4927 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
4929 if (is_cilk_for)
4930 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
4931 and the inner statement contains the name of the built-in function
4932 and grain. */
4933 ws_args = region->inner->ws_args;
4934 else if (is_combined_parallel (region))
4935 ws_args = region->ws_args;
4936 else
4937 ws_args = NULL;
4939 if (child_cfun->cfg)
4941 /* Due to inlining, it may happen that we have already outlined
4942 the region, in which case all we need to do is make the
4943 sub-graph unreachable and emit the parallel call. */
4944 edge entry_succ_e, exit_succ_e;
4946 entry_succ_e = single_succ_edge (entry_bb);
4948 gsi = gsi_last_bb (entry_bb);
4949 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4950 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4951 gsi_remove (&gsi, true);
4953 new_bb = entry_bb;
4954 if (exit_bb)
4956 exit_succ_e = single_succ_edge (exit_bb);
4957 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4959 remove_edge_and_dominated_blocks (entry_succ_e);
4961 else
4963 unsigned srcidx, dstidx, num;
4965 /* If the parallel region needs data sent from the parent
4966 function, then the very first statement (except possible
4967 tree profile counter updates) of the parallel body
4968 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4969 &.OMP_DATA_O is passed as an argument to the child function,
4970 we need to replace it with the argument as seen by the child
4971 function.
4973 In most cases, this will end up being the identity assignment
4974 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4975 a function call that has been inlined, the original PARM_DECL
4976 .OMP_DATA_I may have been converted into a different local
4977 variable. In which case, we need to keep the assignment. */
4978 if (gimple_omp_taskreg_data_arg (entry_stmt))
4980 basic_block entry_succ_bb = single_succ (entry_bb);
4981 tree arg, narg;
4982 gimple parcopy_stmt = NULL;
4984 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4986 gimple stmt;
4988 gcc_assert (!gsi_end_p (gsi));
4989 stmt = gsi_stmt (gsi);
4990 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4991 continue;
4993 if (gimple_num_ops (stmt) == 2)
4995 tree arg = gimple_assign_rhs1 (stmt);
4997 /* We're ignore the subcode because we're
4998 effectively doing a STRIP_NOPS. */
5000 if (TREE_CODE (arg) == ADDR_EXPR
5001 && TREE_OPERAND (arg, 0)
5002 == gimple_omp_taskreg_data_arg (entry_stmt))
5004 parcopy_stmt = stmt;
5005 break;
5010 gcc_assert (parcopy_stmt != NULL);
5011 arg = DECL_ARGUMENTS (child_fn);
5013 if (!gimple_in_ssa_p (cfun))
5015 if (gimple_assign_lhs (parcopy_stmt) == arg)
5016 gsi_remove (&gsi, true);
5017 else
5019 /* ?? Is setting the subcode really necessary ?? */
5020 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
5021 gimple_assign_set_rhs1 (parcopy_stmt, arg);
5024 else
5026 /* If we are in ssa form, we must load the value from the default
5027 definition of the argument. That should not be defined now,
5028 since the argument is not used uninitialized. */
5029 gcc_assert (ssa_default_def (cfun, arg) == NULL);
5030 narg = make_ssa_name (arg, gimple_build_nop ());
5031 set_ssa_default_def (cfun, arg, narg);
5032 /* ?? Is setting the subcode really necessary ?? */
5033 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
5034 gimple_assign_set_rhs1 (parcopy_stmt, narg);
5035 update_stmt (parcopy_stmt);
5039 /* Declare local variables needed in CHILD_CFUN. */
5040 block = DECL_INITIAL (child_fn);
5041 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
5042 /* The gimplifier could record temporaries in parallel/task block
5043 rather than in containing function's local_decls chain,
5044 which would mean cgraph missed finalizing them. Do it now. */
5045 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
5046 if (TREE_CODE (t) == VAR_DECL
5047 && TREE_STATIC (t)
5048 && !DECL_EXTERNAL (t))
5049 varpool_node::finalize_decl (t);
5050 DECL_SAVED_TREE (child_fn) = NULL;
5051 /* We'll create a CFG for child_fn, so no gimple body is needed. */
5052 gimple_set_body (child_fn, NULL);
5053 TREE_USED (block) = 1;
5055 /* Reset DECL_CONTEXT on function arguments. */
5056 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
5057 DECL_CONTEXT (t) = child_fn;
5059 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
5060 so that it can be moved to the child function. */
5061 gsi = gsi_last_bb (entry_bb);
5062 stmt = gsi_stmt (gsi);
5063 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
5064 || gimple_code (stmt) == GIMPLE_OMP_TASK));
5065 gsi_remove (&gsi, true);
5066 e = split_block (entry_bb, stmt);
5067 entry_bb = e->dest;
5068 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5070 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5071 if (exit_bb)
5073 gsi = gsi_last_bb (exit_bb);
5074 gcc_assert (!gsi_end_p (gsi)
5075 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5076 stmt = gimple_build_return (NULL);
5077 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5078 gsi_remove (&gsi, true);
5081 /* Move the parallel region into CHILD_CFUN. */
5083 if (gimple_in_ssa_p (cfun))
5085 init_tree_ssa (child_cfun);
5086 init_ssa_operands (child_cfun);
5087 child_cfun->gimple_df->in_ssa_p = true;
5088 block = NULL_TREE;
5090 else
5091 block = gimple_block (entry_stmt);
5093 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
5094 if (exit_bb)
5095 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
5096 /* When the OMP expansion process cannot guarantee an up-to-date
5097 loop tree arrange for the child function to fixup loops. */
5098 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
5099 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
5101 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
5102 num = vec_safe_length (child_cfun->local_decls);
5103 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
5105 t = (*child_cfun->local_decls)[srcidx];
5106 if (DECL_CONTEXT (t) == cfun->decl)
5107 continue;
5108 if (srcidx != dstidx)
5109 (*child_cfun->local_decls)[dstidx] = t;
5110 dstidx++;
5112 if (dstidx != num)
5113 vec_safe_truncate (child_cfun->local_decls, dstidx);
5115 /* Inform the callgraph about the new function. */
5116 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
5117 cgraph_node::add_new_function (child_fn, true);
5119 /* Fix the callgraph edges for child_cfun. Those for cfun will be
5120 fixed in a following pass. */
5121 push_cfun (child_cfun);
5122 if (optimize)
5123 optimize_omp_library_calls (entry_stmt);
5124 cgraph_edge::rebuild_edges ();
5126 /* Some EH regions might become dead, see PR34608. If
5127 pass_cleanup_cfg isn't the first pass to happen with the
5128 new child, these dead EH edges might cause problems.
5129 Clean them up now. */
5130 if (flag_exceptions)
5132 basic_block bb;
5133 bool changed = false;
5135 FOR_EACH_BB_FN (bb, cfun)
5136 changed |= gimple_purge_dead_eh_edges (bb);
5137 if (changed)
5138 cleanup_tree_cfg ();
5140 if (gimple_in_ssa_p (cfun))
5141 update_ssa (TODO_update_ssa);
5142 pop_cfun ();
5145 /* Emit a library call to launch the children threads. */
5146 if (is_cilk_for)
5147 expand_cilk_for_call (new_bb,
5148 as_a <gomp_parallel *> (entry_stmt), ws_args);
5149 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
5150 expand_parallel_call (region, new_bb,
5151 as_a <gomp_parallel *> (entry_stmt), ws_args);
5152 else
5153 expand_task_call (new_bb, as_a <gomp_task *> (entry_stmt));
5154 if (gimple_in_ssa_p (cfun))
5155 update_ssa (TODO_update_ssa_only_virtuals);
5159 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
5160 of the combined collapse > 1 loop constructs, generate code like:
5161 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
5162 if (cond3 is <)
5163 adj = STEP3 - 1;
5164 else
5165 adj = STEP3 + 1;
5166 count3 = (adj + N32 - N31) / STEP3;
5167 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
5168 if (cond2 is <)
5169 adj = STEP2 - 1;
5170 else
5171 adj = STEP2 + 1;
5172 count2 = (adj + N22 - N21) / STEP2;
5173 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
5174 if (cond1 is <)
5175 adj = STEP1 - 1;
5176 else
5177 adj = STEP1 + 1;
5178 count1 = (adj + N12 - N11) / STEP1;
5179 count = count1 * count2 * count3;
5180 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
5181 count = 0;
5182 and set ZERO_ITER_BB to that bb. If this isn't the outermost
5183 of the combined loop constructs, just initialize COUNTS array
5184 from the _looptemp_ clauses. */
5186 /* NOTE: It *could* be better to moosh all of the BBs together,
5187 creating one larger BB with all the computation and the unexpected
5188 jump at the end. I.e.
5190 bool zero3, zero2, zero1, zero;
5192 zero3 = N32 c3 N31;
5193 count3 = (N32 - N31) /[cl] STEP3;
5194 zero2 = N22 c2 N21;
5195 count2 = (N22 - N21) /[cl] STEP2;
5196 zero1 = N12 c1 N11;
5197 count1 = (N12 - N11) /[cl] STEP1;
5198 zero = zero3 || zero2 || zero1;
5199 count = count1 * count2 * count3;
5200 if (__builtin_expect(zero, false)) goto zero_iter_bb;
5202 After all, we expect the zero=false, and thus we expect to have to
5203 evaluate all of the comparison expressions, so short-circuiting
5204 oughtn't be a win. Since the condition isn't protecting a
5205 denominator, we're not concerned about divide-by-zero, so we can
5206 fully evaluate count even if a numerator turned out to be wrong.
5208 It seems like putting this all together would create much better
5209 scheduling opportunities, and less pressure on the chip's branch
5210 predictor. */
5212 static void
5213 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5214 basic_block &entry_bb, tree *counts,
5215 basic_block &zero_iter_bb, int &first_zero_iter,
5216 basic_block &l2_dom_bb)
5218 tree t, type = TREE_TYPE (fd->loop.v);
5219 edge e, ne;
5220 int i;
5222 /* Collapsed loops need work for expansion into SSA form. */
5223 gcc_assert (!gimple_in_ssa_p (cfun));
5225 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5226 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5228 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5229 isn't supposed to be handled, as the inner loop doesn't
5230 use it. */
5231 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5232 OMP_CLAUSE__LOOPTEMP_);
5233 gcc_assert (innerc);
5234 for (i = 0; i < fd->collapse; i++)
5236 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5237 OMP_CLAUSE__LOOPTEMP_);
5238 gcc_assert (innerc);
5239 if (i)
5240 counts[i] = OMP_CLAUSE_DECL (innerc);
5241 else
5242 counts[0] = NULL_TREE;
5244 return;
5247 for (i = 0; i < fd->collapse; i++)
5249 tree itype = TREE_TYPE (fd->loops[i].v);
5251 if (SSA_VAR_P (fd->loop.n2)
5252 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5253 fold_convert (itype, fd->loops[i].n1),
5254 fold_convert (itype, fd->loops[i].n2)))
5255 == NULL_TREE || !integer_onep (t)))
5257 gcond *cond_stmt;
5258 tree n1, n2;
5259 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5260 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5261 true, GSI_SAME_STMT);
5262 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5263 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5264 true, GSI_SAME_STMT);
5265 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5266 NULL_TREE, NULL_TREE);
5267 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
5268 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
5269 expand_omp_regimplify_p, NULL, NULL)
5270 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
5271 expand_omp_regimplify_p, NULL, NULL))
5273 *gsi = gsi_for_stmt (cond_stmt);
5274 gimple_regimplify_operands (cond_stmt, gsi);
5276 e = split_block (entry_bb, cond_stmt);
5277 if (zero_iter_bb == NULL)
5279 gassign *assign_stmt;
5280 first_zero_iter = i;
5281 zero_iter_bb = create_empty_bb (entry_bb);
5282 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5283 *gsi = gsi_after_labels (zero_iter_bb);
5284 assign_stmt = gimple_build_assign (fd->loop.n2,
5285 build_zero_cst (type));
5286 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
5287 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5288 entry_bb);
5290 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5291 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5292 e->flags = EDGE_TRUE_VALUE;
5293 e->probability = REG_BR_PROB_BASE - ne->probability;
5294 if (l2_dom_bb == NULL)
5295 l2_dom_bb = entry_bb;
5296 entry_bb = e->dest;
5297 *gsi = gsi_last_bb (entry_bb);
5300 if (POINTER_TYPE_P (itype))
5301 itype = signed_type_for (itype);
5302 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5303 ? -1 : 1));
5304 t = fold_build2 (PLUS_EXPR, itype,
5305 fold_convert (itype, fd->loops[i].step), t);
5306 t = fold_build2 (PLUS_EXPR, itype, t,
5307 fold_convert (itype, fd->loops[i].n2));
5308 t = fold_build2 (MINUS_EXPR, itype, t,
5309 fold_convert (itype, fd->loops[i].n1));
5310 /* ?? We could probably use CEIL_DIV_EXPR instead of
5311 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5312 generate the same code in the end because generically we
5313 don't know that the values involved must be negative for
5314 GT?? */
5315 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5316 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5317 fold_build1 (NEGATE_EXPR, itype, t),
5318 fold_build1 (NEGATE_EXPR, itype,
5319 fold_convert (itype,
5320 fd->loops[i].step)));
5321 else
5322 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5323 fold_convert (itype, fd->loops[i].step));
5324 t = fold_convert (type, t);
5325 if (TREE_CODE (t) == INTEGER_CST)
5326 counts[i] = t;
5327 else
5329 counts[i] = create_tmp_reg (type, ".count");
5330 expand_omp_build_assign (gsi, counts[i], t);
5332 if (SSA_VAR_P (fd->loop.n2))
5334 if (i == 0)
5335 t = counts[0];
5336 else
5337 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5338 expand_omp_build_assign (gsi, fd->loop.n2, t);
5344 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5345 T = V;
5346 V3 = N31 + (T % count3) * STEP3;
5347 T = T / count3;
5348 V2 = N21 + (T % count2) * STEP2;
5349 T = T / count2;
5350 V1 = N11 + T * STEP1;
5351 if this loop doesn't have an inner loop construct combined with it.
5352 If it does have an inner loop construct combined with it and the
5353 iteration count isn't known constant, store values from counts array
5354 into its _looptemp_ temporaries instead. */
5356 static void
5357 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5358 tree *counts, gimple inner_stmt, tree startvar)
5360 int i;
5361 if (gimple_omp_for_combined_p (fd->for_stmt))
5363 /* If fd->loop.n2 is constant, then no propagation of the counts
5364 is needed, they are constant. */
5365 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5366 return;
5368 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5369 ? gimple_omp_parallel_clauses (inner_stmt)
5370 : gimple_omp_for_clauses (inner_stmt);
5371 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5372 isn't supposed to be handled, as the inner loop doesn't
5373 use it. */
5374 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5375 gcc_assert (innerc);
5376 for (i = 0; i < fd->collapse; i++)
5378 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5379 OMP_CLAUSE__LOOPTEMP_);
5380 gcc_assert (innerc);
5381 if (i)
5383 tree tem = OMP_CLAUSE_DECL (innerc);
5384 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5385 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5386 false, GSI_CONTINUE_LINKING);
5387 gassign *stmt = gimple_build_assign (tem, t);
5388 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5391 return;
5394 tree type = TREE_TYPE (fd->loop.v);
5395 tree tem = create_tmp_reg (type, ".tem");
5396 gassign *stmt = gimple_build_assign (tem, startvar);
5397 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5399 for (i = fd->collapse - 1; i >= 0; i--)
5401 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5402 itype = vtype;
5403 if (POINTER_TYPE_P (vtype))
5404 itype = signed_type_for (vtype);
5405 if (i != 0)
5406 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5407 else
5408 t = tem;
5409 t = fold_convert (itype, t);
5410 t = fold_build2 (MULT_EXPR, itype, t,
5411 fold_convert (itype, fd->loops[i].step));
5412 if (POINTER_TYPE_P (vtype))
5413 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5414 else
5415 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5416 t = force_gimple_operand_gsi (gsi, t,
5417 DECL_P (fd->loops[i].v)
5418 && TREE_ADDRESSABLE (fd->loops[i].v),
5419 NULL_TREE, false,
5420 GSI_CONTINUE_LINKING);
5421 stmt = gimple_build_assign (fd->loops[i].v, t);
5422 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5423 if (i != 0)
5425 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5426 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5427 false, GSI_CONTINUE_LINKING);
5428 stmt = gimple_build_assign (tem, t);
5429 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5435 /* Helper function for expand_omp_for_*. Generate code like:
5436 L10:
5437 V3 += STEP3;
5438 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5439 L11:
5440 V3 = N31;
5441 V2 += STEP2;
5442 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5443 L12:
5444 V2 = N21;
5445 V1 += STEP1;
5446 goto BODY_BB; */
5448 static basic_block
5449 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5450 basic_block body_bb)
5452 basic_block last_bb, bb, collapse_bb = NULL;
5453 int i;
5454 gimple_stmt_iterator gsi;
5455 edge e;
5456 tree t;
5457 gimple stmt;
5459 last_bb = cont_bb;
5460 for (i = fd->collapse - 1; i >= 0; i--)
5462 tree vtype = TREE_TYPE (fd->loops[i].v);
5464 bb = create_empty_bb (last_bb);
5465 add_bb_to_loop (bb, last_bb->loop_father);
5466 gsi = gsi_start_bb (bb);
5468 if (i < fd->collapse - 1)
5470 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5471 e->probability = REG_BR_PROB_BASE / 8;
5473 t = fd->loops[i + 1].n1;
5474 t = force_gimple_operand_gsi (&gsi, t,
5475 DECL_P (fd->loops[i + 1].v)
5476 && TREE_ADDRESSABLE (fd->loops[i
5477 + 1].v),
5478 NULL_TREE, false,
5479 GSI_CONTINUE_LINKING);
5480 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5481 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5483 else
5484 collapse_bb = bb;
5486 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5488 if (POINTER_TYPE_P (vtype))
5489 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5490 else
5491 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5492 t = force_gimple_operand_gsi (&gsi, t,
5493 DECL_P (fd->loops[i].v)
5494 && TREE_ADDRESSABLE (fd->loops[i].v),
5495 NULL_TREE, false, GSI_CONTINUE_LINKING);
5496 stmt = gimple_build_assign (fd->loops[i].v, t);
5497 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5499 if (i > 0)
5501 t = fd->loops[i].n2;
5502 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5503 false, GSI_CONTINUE_LINKING);
5504 tree v = fd->loops[i].v;
5505 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5506 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5507 false, GSI_CONTINUE_LINKING);
5508 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5509 stmt = gimple_build_cond_empty (t);
5510 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5511 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5512 e->probability = REG_BR_PROB_BASE * 7 / 8;
5514 else
5515 make_edge (bb, body_bb, EDGE_FALLTHRU);
5516 last_bb = bb;
5519 return collapse_bb;
5523 /* A subroutine of expand_omp_for. Generate code for a parallel
5524 loop with any schedule. Given parameters:
5526 for (V = N1; V cond N2; V += STEP) BODY;
5528 where COND is "<" or ">", we generate pseudocode
5530 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5531 if (more) goto L0; else goto L3;
5533 V = istart0;
5534 iend = iend0;
5536 BODY;
5537 V += STEP;
5538 if (V cond iend) goto L1; else goto L2;
5540 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5543 If this is a combined omp parallel loop, instead of the call to
5544 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5545 If this is gimple_omp_for_combined_p loop, then instead of assigning
5546 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5547 inner GIMPLE_OMP_FOR and V += STEP; and
5548 if (V cond iend) goto L1; else goto L2; are removed.
5550 For collapsed loops, given parameters:
5551 collapse(3)
5552 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5553 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5554 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5555 BODY;
5557 we generate pseudocode
5559 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5560 if (cond3 is <)
5561 adj = STEP3 - 1;
5562 else
5563 adj = STEP3 + 1;
5564 count3 = (adj + N32 - N31) / STEP3;
5565 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5566 if (cond2 is <)
5567 adj = STEP2 - 1;
5568 else
5569 adj = STEP2 + 1;
5570 count2 = (adj + N22 - N21) / STEP2;
5571 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5572 if (cond1 is <)
5573 adj = STEP1 - 1;
5574 else
5575 adj = STEP1 + 1;
5576 count1 = (adj + N12 - N11) / STEP1;
5577 count = count1 * count2 * count3;
5578 goto Z1;
5580 count = 0;
5582 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5583 if (more) goto L0; else goto L3;
5585 V = istart0;
5586 T = V;
5587 V3 = N31 + (T % count3) * STEP3;
5588 T = T / count3;
5589 V2 = N21 + (T % count2) * STEP2;
5590 T = T / count2;
5591 V1 = N11 + T * STEP1;
5592 iend = iend0;
5594 BODY;
5595 V += 1;
5596 if (V < iend) goto L10; else goto L2;
5597 L10:
5598 V3 += STEP3;
5599 if (V3 cond3 N32) goto L1; else goto L11;
5600 L11:
5601 V3 = N31;
5602 V2 += STEP2;
5603 if (V2 cond2 N22) goto L1; else goto L12;
5604 L12:
5605 V2 = N21;
5606 V1 += STEP1;
5607 goto L1;
5609 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5614 static void
5615 expand_omp_for_generic (struct omp_region *region,
5616 struct omp_for_data *fd,
5617 enum built_in_function start_fn,
5618 enum built_in_function next_fn,
5619 gimple inner_stmt)
5621 tree type, istart0, iend0, iend;
5622 tree t, vmain, vback, bias = NULL_TREE;
5623 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5624 basic_block l2_bb = NULL, l3_bb = NULL;
5625 gimple_stmt_iterator gsi;
5626 gassign *assign_stmt;
5627 bool in_combined_parallel = is_combined_parallel (region);
5628 bool broken_loop = region->cont == NULL;
5629 edge e, ne;
5630 tree *counts = NULL;
5631 int i;
5633 gcc_assert (!broken_loop || !in_combined_parallel);
5634 gcc_assert (fd->iter_type == long_integer_type_node
5635 || !in_combined_parallel);
5637 type = TREE_TYPE (fd->loop.v);
5638 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5639 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5640 TREE_ADDRESSABLE (istart0) = 1;
5641 TREE_ADDRESSABLE (iend0) = 1;
5643 /* See if we need to bias by LLONG_MIN. */
5644 if (fd->iter_type == long_long_unsigned_type_node
5645 && TREE_CODE (type) == INTEGER_TYPE
5646 && !TYPE_UNSIGNED (type))
5648 tree n1, n2;
5650 if (fd->loop.cond_code == LT_EXPR)
5652 n1 = fd->loop.n1;
5653 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5655 else
5657 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5658 n2 = fd->loop.n1;
5660 if (TREE_CODE (n1) != INTEGER_CST
5661 || TREE_CODE (n2) != INTEGER_CST
5662 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5663 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5666 entry_bb = region->entry;
5667 cont_bb = region->cont;
5668 collapse_bb = NULL;
5669 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5670 gcc_assert (broken_loop
5671 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5672 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5673 l1_bb = single_succ (l0_bb);
5674 if (!broken_loop)
5676 l2_bb = create_empty_bb (cont_bb);
5677 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5678 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5680 else
5681 l2_bb = NULL;
5682 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5683 exit_bb = region->exit;
5685 gsi = gsi_last_bb (entry_bb);
5687 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5688 if (fd->collapse > 1)
5690 int first_zero_iter = -1;
5691 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5693 counts = XALLOCAVEC (tree, fd->collapse);
5694 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5695 zero_iter_bb, first_zero_iter,
5696 l2_dom_bb);
5698 if (zero_iter_bb)
5700 /* Some counts[i] vars might be uninitialized if
5701 some loop has zero iterations. But the body shouldn't
5702 be executed in that case, so just avoid uninit warnings. */
5703 for (i = first_zero_iter; i < fd->collapse; i++)
5704 if (SSA_VAR_P (counts[i]))
5705 TREE_NO_WARNING (counts[i]) = 1;
5706 gsi_prev (&gsi);
5707 e = split_block (entry_bb, gsi_stmt (gsi));
5708 entry_bb = e->dest;
5709 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5710 gsi = gsi_last_bb (entry_bb);
5711 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5712 get_immediate_dominator (CDI_DOMINATORS,
5713 zero_iter_bb));
5716 if (in_combined_parallel)
5718 /* In a combined parallel loop, emit a call to
5719 GOMP_loop_foo_next. */
5720 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5721 build_fold_addr_expr (istart0),
5722 build_fold_addr_expr (iend0));
5724 else
5726 tree t0, t1, t2, t3, t4;
5727 /* If this is not a combined parallel loop, emit a call to
5728 GOMP_loop_foo_start in ENTRY_BB. */
5729 t4 = build_fold_addr_expr (iend0);
5730 t3 = build_fold_addr_expr (istart0);
5731 t2 = fold_convert (fd->iter_type, fd->loop.step);
5732 t1 = fd->loop.n2;
5733 t0 = fd->loop.n1;
5734 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5736 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5737 OMP_CLAUSE__LOOPTEMP_);
5738 gcc_assert (innerc);
5739 t0 = OMP_CLAUSE_DECL (innerc);
5740 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5741 OMP_CLAUSE__LOOPTEMP_);
5742 gcc_assert (innerc);
5743 t1 = OMP_CLAUSE_DECL (innerc);
5745 if (POINTER_TYPE_P (TREE_TYPE (t0))
5746 && TYPE_PRECISION (TREE_TYPE (t0))
5747 != TYPE_PRECISION (fd->iter_type))
5749 /* Avoid casting pointers to integer of a different size. */
5750 tree itype = signed_type_for (type);
5751 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5752 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5754 else
5756 t1 = fold_convert (fd->iter_type, t1);
5757 t0 = fold_convert (fd->iter_type, t0);
5759 if (bias)
5761 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5762 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5764 if (fd->iter_type == long_integer_type_node)
5766 if (fd->chunk_size)
5768 t = fold_convert (fd->iter_type, fd->chunk_size);
5769 t = build_call_expr (builtin_decl_explicit (start_fn),
5770 6, t0, t1, t2, t, t3, t4);
5772 else
5773 t = build_call_expr (builtin_decl_explicit (start_fn),
5774 5, t0, t1, t2, t3, t4);
5776 else
5778 tree t5;
5779 tree c_bool_type;
5780 tree bfn_decl;
5782 /* The GOMP_loop_ull_*start functions have additional boolean
5783 argument, true for < loops and false for > loops.
5784 In Fortran, the C bool type can be different from
5785 boolean_type_node. */
5786 bfn_decl = builtin_decl_explicit (start_fn);
5787 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5788 t5 = build_int_cst (c_bool_type,
5789 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5790 if (fd->chunk_size)
5792 tree bfn_decl = builtin_decl_explicit (start_fn);
5793 t = fold_convert (fd->iter_type, fd->chunk_size);
5794 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5796 else
5797 t = build_call_expr (builtin_decl_explicit (start_fn),
5798 6, t5, t0, t1, t2, t3, t4);
5801 if (TREE_TYPE (t) != boolean_type_node)
5802 t = fold_build2 (NE_EXPR, boolean_type_node,
5803 t, build_int_cst (TREE_TYPE (t), 0));
5804 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5805 true, GSI_SAME_STMT);
5806 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5808 /* Remove the GIMPLE_OMP_FOR statement. */
5809 gsi_remove (&gsi, true);
5811 /* Iteration setup for sequential loop goes in L0_BB. */
5812 tree startvar = fd->loop.v;
5813 tree endvar = NULL_TREE;
5815 if (gimple_omp_for_combined_p (fd->for_stmt))
5817 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5818 && gimple_omp_for_kind (inner_stmt)
5819 == GF_OMP_FOR_KIND_SIMD);
5820 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5821 OMP_CLAUSE__LOOPTEMP_);
5822 gcc_assert (innerc);
5823 startvar = OMP_CLAUSE_DECL (innerc);
5824 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5825 OMP_CLAUSE__LOOPTEMP_);
5826 gcc_assert (innerc);
5827 endvar = OMP_CLAUSE_DECL (innerc);
5830 gsi = gsi_start_bb (l0_bb);
5831 t = istart0;
5832 if (bias)
5833 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5834 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5835 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5836 t = fold_convert (TREE_TYPE (startvar), t);
5837 t = force_gimple_operand_gsi (&gsi, t,
5838 DECL_P (startvar)
5839 && TREE_ADDRESSABLE (startvar),
5840 NULL_TREE, false, GSI_CONTINUE_LINKING);
5841 assign_stmt = gimple_build_assign (startvar, t);
5842 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5844 t = iend0;
5845 if (bias)
5846 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5847 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5848 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5849 t = fold_convert (TREE_TYPE (startvar), t);
5850 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5851 false, GSI_CONTINUE_LINKING);
5852 if (endvar)
5854 assign_stmt = gimple_build_assign (endvar, iend);
5855 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5856 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
5857 assign_stmt = gimple_build_assign (fd->loop.v, iend);
5858 else
5859 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
5860 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5862 if (fd->collapse > 1)
5863 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5865 if (!broken_loop)
5867 /* Code to control the increment and predicate for the sequential
5868 loop goes in the CONT_BB. */
5869 gsi = gsi_last_bb (cont_bb);
5870 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
5871 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
5872 vmain = gimple_omp_continue_control_use (cont_stmt);
5873 vback = gimple_omp_continue_control_def (cont_stmt);
5875 if (!gimple_omp_for_combined_p (fd->for_stmt))
5877 if (POINTER_TYPE_P (type))
5878 t = fold_build_pointer_plus (vmain, fd->loop.step);
5879 else
5880 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5881 t = force_gimple_operand_gsi (&gsi, t,
5882 DECL_P (vback)
5883 && TREE_ADDRESSABLE (vback),
5884 NULL_TREE, true, GSI_SAME_STMT);
5885 assign_stmt = gimple_build_assign (vback, t);
5886 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
5888 t = build2 (fd->loop.cond_code, boolean_type_node,
5889 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5890 iend);
5891 gcond *cond_stmt = gimple_build_cond_empty (t);
5892 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
5895 /* Remove GIMPLE_OMP_CONTINUE. */
5896 gsi_remove (&gsi, true);
5898 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5899 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5901 /* Emit code to get the next parallel iteration in L2_BB. */
5902 gsi = gsi_start_bb (l2_bb);
5904 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5905 build_fold_addr_expr (istart0),
5906 build_fold_addr_expr (iend0));
5907 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5908 false, GSI_CONTINUE_LINKING);
5909 if (TREE_TYPE (t) != boolean_type_node)
5910 t = fold_build2 (NE_EXPR, boolean_type_node,
5911 t, build_int_cst (TREE_TYPE (t), 0));
5912 gcond *cond_stmt = gimple_build_cond_empty (t);
5913 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
5916 /* Add the loop cleanup function. */
5917 gsi = gsi_last_bb (exit_bb);
5918 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5919 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5920 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5921 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5922 else
5923 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5924 gcall *call_stmt = gimple_build_call (t, 0);
5925 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5926 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5927 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
5928 gsi_remove (&gsi, true);
5930 /* Connect the new blocks. */
5931 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5932 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5934 if (!broken_loop)
5936 gimple_seq phis;
5938 e = find_edge (cont_bb, l3_bb);
5939 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5941 phis = phi_nodes (l3_bb);
5942 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5944 gimple phi = gsi_stmt (gsi);
5945 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5946 PHI_ARG_DEF_FROM_EDGE (phi, e));
5948 remove_edge (e);
5950 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5951 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5952 e = find_edge (cont_bb, l1_bb);
5953 if (gimple_omp_for_combined_p (fd->for_stmt))
5955 remove_edge (e);
5956 e = NULL;
5958 else if (fd->collapse > 1)
5960 remove_edge (e);
5961 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5963 else
5964 e->flags = EDGE_TRUE_VALUE;
5965 if (e)
5967 e->probability = REG_BR_PROB_BASE * 7 / 8;
5968 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5970 else
5972 e = find_edge (cont_bb, l2_bb);
5973 e->flags = EDGE_FALLTHRU;
5975 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5977 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5978 recompute_dominator (CDI_DOMINATORS, l2_bb));
5979 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5980 recompute_dominator (CDI_DOMINATORS, l3_bb));
5981 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5982 recompute_dominator (CDI_DOMINATORS, l0_bb));
5983 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5984 recompute_dominator (CDI_DOMINATORS, l1_bb));
5986 struct loop *outer_loop = alloc_loop ();
5987 outer_loop->header = l0_bb;
5988 outer_loop->latch = l2_bb;
5989 add_loop (outer_loop, l0_bb->loop_father);
5991 if (!gimple_omp_for_combined_p (fd->for_stmt))
5993 struct loop *loop = alloc_loop ();
5994 loop->header = l1_bb;
5995 /* The loop may have multiple latches. */
5996 add_loop (loop, outer_loop);
6002 /* A subroutine of expand_omp_for. Generate code for a parallel
6003 loop with static schedule and no specified chunk size. Given
6004 parameters:
6006 for (V = N1; V cond N2; V += STEP) BODY;
6008 where COND is "<" or ">", we generate pseudocode
6010 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6011 if (cond is <)
6012 adj = STEP - 1;
6013 else
6014 adj = STEP + 1;
6015 if ((__typeof (V)) -1 > 0 && cond is >)
6016 n = -(adj + N2 - N1) / -STEP;
6017 else
6018 n = (adj + N2 - N1) / STEP;
6019 q = n / nthreads;
6020 tt = n % nthreads;
6021 if (threadid < tt) goto L3; else goto L4;
6023 tt = 0;
6024 q = q + 1;
6026 s0 = q * threadid + tt;
6027 e0 = s0 + q;
6028 V = s0 * STEP + N1;
6029 if (s0 >= e0) goto L2; else goto L0;
6031 e = e0 * STEP + N1;
6033 BODY;
6034 V += STEP;
6035 if (V cond e) goto L1;
6039 static void
6040 expand_omp_for_static_nochunk (struct omp_region *region,
6041 struct omp_for_data *fd,
6042 gimple inner_stmt)
6044 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
6045 tree type, itype, vmain, vback;
6046 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
6047 basic_block body_bb, cont_bb, collapse_bb = NULL;
6048 basic_block fin_bb;
6049 gimple_stmt_iterator gsi;
6050 edge ep;
6051 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6052 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6053 bool broken_loop = region->cont == NULL;
6054 tree *counts = NULL;
6055 tree n1, n2, step;
6057 itype = type = TREE_TYPE (fd->loop.v);
6058 if (POINTER_TYPE_P (type))
6059 itype = signed_type_for (type);
6061 entry_bb = region->entry;
6062 cont_bb = region->cont;
6063 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6064 fin_bb = BRANCH_EDGE (entry_bb)->dest;
6065 gcc_assert (broken_loop
6066 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
6067 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
6068 body_bb = single_succ (seq_start_bb);
6069 if (!broken_loop)
6071 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6072 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6074 exit_bb = region->exit;
6076 /* Iteration space partitioning goes in ENTRY_BB. */
6077 gsi = gsi_last_bb (entry_bb);
6078 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6080 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6082 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6083 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6086 if (fd->collapse > 1)
6088 int first_zero_iter = -1;
6089 basic_block l2_dom_bb = NULL;
6091 counts = XALLOCAVEC (tree, fd->collapse);
6092 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6093 fin_bb, first_zero_iter,
6094 l2_dom_bb);
6095 t = NULL_TREE;
6097 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6098 t = integer_one_node;
6099 else
6100 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6101 fold_convert (type, fd->loop.n1),
6102 fold_convert (type, fd->loop.n2));
6103 if (fd->collapse == 1
6104 && TYPE_UNSIGNED (type)
6105 && (t == NULL_TREE || !integer_onep (t)))
6107 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6108 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6109 true, GSI_SAME_STMT);
6110 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6111 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6112 true, GSI_SAME_STMT);
6113 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6114 NULL_TREE, NULL_TREE);
6115 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6116 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6117 expand_omp_regimplify_p, NULL, NULL)
6118 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6119 expand_omp_regimplify_p, NULL, NULL))
6121 gsi = gsi_for_stmt (cond_stmt);
6122 gimple_regimplify_operands (cond_stmt, &gsi);
6124 ep = split_block (entry_bb, cond_stmt);
6125 ep->flags = EDGE_TRUE_VALUE;
6126 entry_bb = ep->dest;
6127 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6128 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
6129 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
6130 if (gimple_in_ssa_p (cfun))
6132 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6133 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
6134 !gsi_end_p (gpi); gsi_next (&gpi))
6136 gphi *phi = gpi.phi ();
6137 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6138 ep, UNKNOWN_LOCATION);
6141 gsi = gsi_last_bb (entry_bb);
6144 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6145 t = fold_convert (itype, t);
6146 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6147 true, GSI_SAME_STMT);
6149 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6150 t = fold_convert (itype, t);
6151 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6152 true, GSI_SAME_STMT);
6154 n1 = fd->loop.n1;
6155 n2 = fd->loop.n2;
6156 step = fd->loop.step;
6157 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6159 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6160 OMP_CLAUSE__LOOPTEMP_);
6161 gcc_assert (innerc);
6162 n1 = OMP_CLAUSE_DECL (innerc);
6163 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6164 OMP_CLAUSE__LOOPTEMP_);
6165 gcc_assert (innerc);
6166 n2 = OMP_CLAUSE_DECL (innerc);
6168 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6169 true, NULL_TREE, true, GSI_SAME_STMT);
6170 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6171 true, NULL_TREE, true, GSI_SAME_STMT);
6172 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6173 true, NULL_TREE, true, GSI_SAME_STMT);
6175 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6176 t = fold_build2 (PLUS_EXPR, itype, step, t);
6177 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6178 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6179 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6180 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6181 fold_build1 (NEGATE_EXPR, itype, t),
6182 fold_build1 (NEGATE_EXPR, itype, step));
6183 else
6184 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6185 t = fold_convert (itype, t);
6186 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6188 q = create_tmp_reg (itype, "q");
6189 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
6190 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6191 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
6193 tt = create_tmp_reg (itype, "tt");
6194 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
6195 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6196 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
6198 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
6199 gcond *cond_stmt = gimple_build_cond_empty (t);
6200 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6202 second_bb = split_block (entry_bb, cond_stmt)->dest;
6203 gsi = gsi_last_bb (second_bb);
6204 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6206 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
6207 GSI_SAME_STMT);
6208 gassign *assign_stmt
6209 = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1));
6210 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6212 third_bb = split_block (second_bb, assign_stmt)->dest;
6213 gsi = gsi_last_bb (third_bb);
6214 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6216 t = build2 (MULT_EXPR, itype, q, threadid);
6217 t = build2 (PLUS_EXPR, itype, t, tt);
6218 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6220 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6221 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6223 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6224 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6226 /* Remove the GIMPLE_OMP_FOR statement. */
6227 gsi_remove (&gsi, true);
6229 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6230 gsi = gsi_start_bb (seq_start_bb);
6232 tree startvar = fd->loop.v;
6233 tree endvar = NULL_TREE;
6235 if (gimple_omp_for_combined_p (fd->for_stmt))
6237 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6238 ? gimple_omp_parallel_clauses (inner_stmt)
6239 : gimple_omp_for_clauses (inner_stmt);
6240 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6241 gcc_assert (innerc);
6242 startvar = OMP_CLAUSE_DECL (innerc);
6243 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6244 OMP_CLAUSE__LOOPTEMP_);
6245 gcc_assert (innerc);
6246 endvar = OMP_CLAUSE_DECL (innerc);
6248 t = fold_convert (itype, s0);
6249 t = fold_build2 (MULT_EXPR, itype, t, step);
6250 if (POINTER_TYPE_P (type))
6251 t = fold_build_pointer_plus (n1, t);
6252 else
6253 t = fold_build2 (PLUS_EXPR, type, t, n1);
6254 t = fold_convert (TREE_TYPE (startvar), t);
6255 t = force_gimple_operand_gsi (&gsi, t,
6256 DECL_P (startvar)
6257 && TREE_ADDRESSABLE (startvar),
6258 NULL_TREE, false, GSI_CONTINUE_LINKING);
6259 assign_stmt = gimple_build_assign (startvar, t);
6260 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6262 t = fold_convert (itype, e0);
6263 t = fold_build2 (MULT_EXPR, itype, t, step);
6264 if (POINTER_TYPE_P (type))
6265 t = fold_build_pointer_plus (n1, t);
6266 else
6267 t = fold_build2 (PLUS_EXPR, type, t, n1);
6268 t = fold_convert (TREE_TYPE (startvar), t);
6269 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6270 false, GSI_CONTINUE_LINKING);
6271 if (endvar)
6273 assign_stmt = gimple_build_assign (endvar, e);
6274 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6275 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6276 assign_stmt = gimple_build_assign (fd->loop.v, e);
6277 else
6278 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
6279 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6281 if (fd->collapse > 1)
6282 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6284 if (!broken_loop)
6286 /* The code controlling the sequential loop replaces the
6287 GIMPLE_OMP_CONTINUE. */
6288 gsi = gsi_last_bb (cont_bb);
6289 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
6290 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
6291 vmain = gimple_omp_continue_control_use (cont_stmt);
6292 vback = gimple_omp_continue_control_def (cont_stmt);
6294 if (!gimple_omp_for_combined_p (fd->for_stmt))
6296 if (POINTER_TYPE_P (type))
6297 t = fold_build_pointer_plus (vmain, step);
6298 else
6299 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6300 t = force_gimple_operand_gsi (&gsi, t,
6301 DECL_P (vback)
6302 && TREE_ADDRESSABLE (vback),
6303 NULL_TREE, true, GSI_SAME_STMT);
6304 assign_stmt = gimple_build_assign (vback, t);
6305 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6307 t = build2 (fd->loop.cond_code, boolean_type_node,
6308 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6309 ? t : vback, e);
6310 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6313 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6314 gsi_remove (&gsi, true);
6316 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6317 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6320 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6321 gsi = gsi_last_bb (exit_bb);
6322 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6324 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6325 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6327 gsi_remove (&gsi, true);
6329 /* Connect all the blocks. */
6330 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6331 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6332 ep = find_edge (entry_bb, second_bb);
6333 ep->flags = EDGE_TRUE_VALUE;
6334 ep->probability = REG_BR_PROB_BASE / 4;
6335 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6336 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6338 if (!broken_loop)
6340 ep = find_edge (cont_bb, body_bb);
6341 if (gimple_omp_for_combined_p (fd->for_stmt))
6343 remove_edge (ep);
6344 ep = NULL;
6346 else if (fd->collapse > 1)
6348 remove_edge (ep);
6349 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6351 else
6352 ep->flags = EDGE_TRUE_VALUE;
6353 find_edge (cont_bb, fin_bb)->flags
6354 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6357 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6358 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6359 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6361 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6362 recompute_dominator (CDI_DOMINATORS, body_bb));
6363 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6364 recompute_dominator (CDI_DOMINATORS, fin_bb));
6366 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6368 struct loop *loop = alloc_loop ();
6369 loop->header = body_bb;
6370 if (collapse_bb == NULL)
6371 loop->latch = cont_bb;
6372 add_loop (loop, body_bb->loop_father);
6377 /* A subroutine of expand_omp_for. Generate code for a parallel
6378 loop with static schedule and a specified chunk size. Given
6379 parameters:
6381 for (V = N1; V cond N2; V += STEP) BODY;
6383 where COND is "<" or ">", we generate pseudocode
6385 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6386 if (cond is <)
6387 adj = STEP - 1;
6388 else
6389 adj = STEP + 1;
6390 if ((__typeof (V)) -1 > 0 && cond is >)
6391 n = -(adj + N2 - N1) / -STEP;
6392 else
6393 n = (adj + N2 - N1) / STEP;
6394 trip = 0;
6395 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6396 here so that V is defined
6397 if the loop is not entered
6399 s0 = (trip * nthreads + threadid) * CHUNK;
6400 e0 = min(s0 + CHUNK, n);
6401 if (s0 < n) goto L1; else goto L4;
6403 V = s0 * STEP + N1;
6404 e = e0 * STEP + N1;
6406 BODY;
6407 V += STEP;
6408 if (V cond e) goto L2; else goto L3;
6410 trip += 1;
6411 goto L0;
6415 static void
6416 expand_omp_for_static_chunk (struct omp_region *region,
6417 struct omp_for_data *fd, gimple inner_stmt)
6419 tree n, s0, e0, e, t;
6420 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6421 tree type, itype, vmain, vback, vextra;
6422 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6423 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6424 gimple_stmt_iterator gsi;
6425 edge se;
6426 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6427 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6428 bool broken_loop = region->cont == NULL;
6429 tree *counts = NULL;
6430 tree n1, n2, step;
6432 itype = type = TREE_TYPE (fd->loop.v);
6433 if (POINTER_TYPE_P (type))
6434 itype = signed_type_for (type);
6436 entry_bb = region->entry;
6437 se = split_block (entry_bb, last_stmt (entry_bb));
6438 entry_bb = se->src;
6439 iter_part_bb = se->dest;
6440 cont_bb = region->cont;
6441 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6442 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6443 gcc_assert (broken_loop
6444 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6445 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6446 body_bb = single_succ (seq_start_bb);
6447 if (!broken_loop)
6449 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6450 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6451 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6453 exit_bb = region->exit;
6455 /* Trip and adjustment setup goes in ENTRY_BB. */
6456 gsi = gsi_last_bb (entry_bb);
6457 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6459 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6461 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6462 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6465 if (fd->collapse > 1)
6467 int first_zero_iter = -1;
6468 basic_block l2_dom_bb = NULL;
6470 counts = XALLOCAVEC (tree, fd->collapse);
6471 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6472 fin_bb, first_zero_iter,
6473 l2_dom_bb);
6474 t = NULL_TREE;
6476 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6477 t = integer_one_node;
6478 else
6479 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6480 fold_convert (type, fd->loop.n1),
6481 fold_convert (type, fd->loop.n2));
6482 if (fd->collapse == 1
6483 && TYPE_UNSIGNED (type)
6484 && (t == NULL_TREE || !integer_onep (t)))
6486 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6487 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6488 true, GSI_SAME_STMT);
6489 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6490 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6491 true, GSI_SAME_STMT);
6492 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6493 NULL_TREE, NULL_TREE);
6494 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6495 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6496 expand_omp_regimplify_p, NULL, NULL)
6497 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6498 expand_omp_regimplify_p, NULL, NULL))
6500 gsi = gsi_for_stmt (cond_stmt);
6501 gimple_regimplify_operands (cond_stmt, &gsi);
6503 se = split_block (entry_bb, cond_stmt);
6504 se->flags = EDGE_TRUE_VALUE;
6505 entry_bb = se->dest;
6506 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6507 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6508 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6509 if (gimple_in_ssa_p (cfun))
6511 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6512 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
6513 !gsi_end_p (gpi); gsi_next (&gpi))
6515 gphi *phi = gpi.phi ();
6516 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6517 se, UNKNOWN_LOCATION);
6520 gsi = gsi_last_bb (entry_bb);
6523 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6524 t = fold_convert (itype, t);
6525 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6526 true, GSI_SAME_STMT);
6528 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6529 t = fold_convert (itype, t);
6530 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6531 true, GSI_SAME_STMT);
6533 n1 = fd->loop.n1;
6534 n2 = fd->loop.n2;
6535 step = fd->loop.step;
6536 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6538 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6539 OMP_CLAUSE__LOOPTEMP_);
6540 gcc_assert (innerc);
6541 n1 = OMP_CLAUSE_DECL (innerc);
6542 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6543 OMP_CLAUSE__LOOPTEMP_);
6544 gcc_assert (innerc);
6545 n2 = OMP_CLAUSE_DECL (innerc);
6547 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6548 true, NULL_TREE, true, GSI_SAME_STMT);
6549 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6550 true, NULL_TREE, true, GSI_SAME_STMT);
6551 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6552 true, NULL_TREE, true, GSI_SAME_STMT);
6553 fd->chunk_size
6554 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
6555 true, NULL_TREE, true, GSI_SAME_STMT);
6557 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6558 t = fold_build2 (PLUS_EXPR, itype, step, t);
6559 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6560 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6561 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6562 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6563 fold_build1 (NEGATE_EXPR, itype, t),
6564 fold_build1 (NEGATE_EXPR, itype, step));
6565 else
6566 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6567 t = fold_convert (itype, t);
6568 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6569 true, GSI_SAME_STMT);
6571 trip_var = create_tmp_reg (itype, ".trip");
6572 if (gimple_in_ssa_p (cfun))
6574 trip_init = make_ssa_name (trip_var);
6575 trip_main = make_ssa_name (trip_var);
6576 trip_back = make_ssa_name (trip_var);
6578 else
6580 trip_init = trip_var;
6581 trip_main = trip_var;
6582 trip_back = trip_var;
6585 gassign *assign_stmt
6586 = gimple_build_assign (trip_init, build_int_cst (itype, 0));
6587 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6589 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6590 t = fold_build2 (MULT_EXPR, itype, t, step);
6591 if (POINTER_TYPE_P (type))
6592 t = fold_build_pointer_plus (n1, t);
6593 else
6594 t = fold_build2 (PLUS_EXPR, type, t, n1);
6595 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6596 true, GSI_SAME_STMT);
6598 /* Remove the GIMPLE_OMP_FOR. */
6599 gsi_remove (&gsi, true);
6601 /* Iteration space partitioning goes in ITER_PART_BB. */
6602 gsi = gsi_last_bb (iter_part_bb);
6604 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6605 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6606 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6607 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6608 false, GSI_CONTINUE_LINKING);
6610 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6611 t = fold_build2 (MIN_EXPR, itype, t, n);
6612 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6613 false, GSI_CONTINUE_LINKING);
6615 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6616 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6618 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6619 gsi = gsi_start_bb (seq_start_bb);
6621 tree startvar = fd->loop.v;
6622 tree endvar = NULL_TREE;
6624 if (gimple_omp_for_combined_p (fd->for_stmt))
6626 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6627 ? gimple_omp_parallel_clauses (inner_stmt)
6628 : gimple_omp_for_clauses (inner_stmt);
6629 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6630 gcc_assert (innerc);
6631 startvar = OMP_CLAUSE_DECL (innerc);
6632 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6633 OMP_CLAUSE__LOOPTEMP_);
6634 gcc_assert (innerc);
6635 endvar = OMP_CLAUSE_DECL (innerc);
6638 t = fold_convert (itype, s0);
6639 t = fold_build2 (MULT_EXPR, itype, t, step);
6640 if (POINTER_TYPE_P (type))
6641 t = fold_build_pointer_plus (n1, t);
6642 else
6643 t = fold_build2 (PLUS_EXPR, type, t, n1);
6644 t = fold_convert (TREE_TYPE (startvar), t);
6645 t = force_gimple_operand_gsi (&gsi, t,
6646 DECL_P (startvar)
6647 && TREE_ADDRESSABLE (startvar),
6648 NULL_TREE, false, GSI_CONTINUE_LINKING);
6649 assign_stmt = gimple_build_assign (startvar, t);
6650 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6652 t = fold_convert (itype, e0);
6653 t = fold_build2 (MULT_EXPR, itype, t, step);
6654 if (POINTER_TYPE_P (type))
6655 t = fold_build_pointer_plus (n1, t);
6656 else
6657 t = fold_build2 (PLUS_EXPR, type, t, n1);
6658 t = fold_convert (TREE_TYPE (startvar), t);
6659 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6660 false, GSI_CONTINUE_LINKING);
6661 if (endvar)
6663 assign_stmt = gimple_build_assign (endvar, e);
6664 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6665 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6666 assign_stmt = gimple_build_assign (fd->loop.v, e);
6667 else
6668 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
6669 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6671 if (fd->collapse > 1)
6672 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6674 if (!broken_loop)
6676 /* The code controlling the sequential loop goes in CONT_BB,
6677 replacing the GIMPLE_OMP_CONTINUE. */
6678 gsi = gsi_last_bb (cont_bb);
6679 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
6680 vmain = gimple_omp_continue_control_use (cont_stmt);
6681 vback = gimple_omp_continue_control_def (cont_stmt);
6683 if (!gimple_omp_for_combined_p (fd->for_stmt))
6685 if (POINTER_TYPE_P (type))
6686 t = fold_build_pointer_plus (vmain, step);
6687 else
6688 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6689 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
6690 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6691 true, GSI_SAME_STMT);
6692 assign_stmt = gimple_build_assign (vback, t);
6693 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6695 t = build2 (fd->loop.cond_code, boolean_type_node,
6696 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6697 ? t : vback, e);
6698 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6701 /* Remove GIMPLE_OMP_CONTINUE. */
6702 gsi_remove (&gsi, true);
6704 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6705 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6707 /* Trip update code goes into TRIP_UPDATE_BB. */
6708 gsi = gsi_start_bb (trip_update_bb);
6710 t = build_int_cst (itype, 1);
6711 t = build2 (PLUS_EXPR, itype, trip_main, t);
6712 assign_stmt = gimple_build_assign (trip_back, t);
6713 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6716 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6717 gsi = gsi_last_bb (exit_bb);
6718 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6720 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6721 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6723 gsi_remove (&gsi, true);
6725 /* Connect the new blocks. */
6726 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6727 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6729 if (!broken_loop)
6731 se = find_edge (cont_bb, body_bb);
6732 if (gimple_omp_for_combined_p (fd->for_stmt))
6734 remove_edge (se);
6735 se = NULL;
6737 else if (fd->collapse > 1)
6739 remove_edge (se);
6740 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6742 else
6743 se->flags = EDGE_TRUE_VALUE;
6744 find_edge (cont_bb, trip_update_bb)->flags
6745 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6747 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6750 if (gimple_in_ssa_p (cfun))
6752 gphi_iterator psi;
6753 gphi *phi;
6754 edge re, ene;
6755 edge_var_map *vm;
6756 size_t i;
6758 gcc_assert (fd->collapse == 1 && !broken_loop);
6760 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6761 remove arguments of the phi nodes in fin_bb. We need to create
6762 appropriate phi nodes in iter_part_bb instead. */
6763 se = single_pred_edge (fin_bb);
6764 re = single_succ_edge (trip_update_bb);
6765 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
6766 ene = single_succ_edge (entry_bb);
6768 psi = gsi_start_phis (fin_bb);
6769 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6770 gsi_next (&psi), ++i)
6772 gphi *nphi;
6773 source_location locus;
6775 phi = psi.phi ();
6776 t = gimple_phi_result (phi);
6777 gcc_assert (t == redirect_edge_var_map_result (vm));
6778 nphi = create_phi_node (t, iter_part_bb);
6780 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6781 locus = gimple_phi_arg_location_from_edge (phi, se);
6783 /* A special case -- fd->loop.v is not yet computed in
6784 iter_part_bb, we need to use vextra instead. */
6785 if (t == fd->loop.v)
6786 t = vextra;
6787 add_phi_arg (nphi, t, ene, locus);
6788 locus = redirect_edge_var_map_location (vm);
6789 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6791 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6792 redirect_edge_var_map_clear (re);
6793 while (1)
6795 psi = gsi_start_phis (fin_bb);
6796 if (gsi_end_p (psi))
6797 break;
6798 remove_phi_node (&psi, false);
6801 /* Make phi node for trip. */
6802 phi = create_phi_node (trip_main, iter_part_bb);
6803 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6804 UNKNOWN_LOCATION);
6805 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6806 UNKNOWN_LOCATION);
6809 if (!broken_loop)
6810 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6811 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6812 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6813 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6814 recompute_dominator (CDI_DOMINATORS, fin_bb));
6815 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6816 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6817 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6818 recompute_dominator (CDI_DOMINATORS, body_bb));
6820 if (!broken_loop)
6822 struct loop *trip_loop = alloc_loop ();
6823 trip_loop->header = iter_part_bb;
6824 trip_loop->latch = trip_update_bb;
6825 add_loop (trip_loop, iter_part_bb->loop_father);
6827 if (!gimple_omp_for_combined_p (fd->for_stmt))
6829 struct loop *loop = alloc_loop ();
6830 loop->header = body_bb;
6831 if (collapse_bb == NULL)
6832 loop->latch = cont_bb;
6833 add_loop (loop, trip_loop);
6838 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
6839 Given parameters:
6840 for (V = N1; V cond N2; V += STEP) BODY;
6842 where COND is "<" or ">" or "!=", we generate pseudocode
6844 for (ind_var = low; ind_var < high; ind_var++)
6846 V = n1 + (ind_var * STEP)
6848 <BODY>
6851 In the above pseudocode, low and high are function parameters of the
6852 child function. In the function below, we are inserting a temp.
6853 variable that will be making a call to two OMP functions that will not be
6854 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
6855 with _Cilk_for). These functions are replaced with low and high
6856 by the function that handles taskreg. */
6859 static void
6860 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
6862 bool broken_loop = region->cont == NULL;
6863 basic_block entry_bb = region->entry;
6864 basic_block cont_bb = region->cont;
6866 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6867 gcc_assert (broken_loop
6868 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6869 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6870 basic_block l1_bb, l2_bb;
6872 if (!broken_loop)
6874 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6875 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6876 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6877 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6879 else
6881 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6882 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6883 l2_bb = single_succ (l1_bb);
6885 basic_block exit_bb = region->exit;
6886 basic_block l2_dom_bb = NULL;
6888 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
6890 /* Below statements until the "tree high_val = ..." are pseudo statements
6891 used to pass information to be used by expand_omp_taskreg.
6892 low_val and high_val will be replaced by the __low and __high
6893 parameter from the child function.
6895 The call_exprs part is a place-holder, it is mainly used
6896 to distinctly identify to the top-level part that this is
6897 where we should put low and high (reasoning given in header
6898 comment). */
6900 tree child_fndecl
6901 = gimple_omp_parallel_child_fn (
6902 as_a <gomp_parallel *> (last_stmt (region->outer->entry)));
6903 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
6904 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
6906 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
6907 high_val = t;
6908 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
6909 low_val = t;
6911 gcc_assert (low_val && high_val);
6913 tree type = TREE_TYPE (low_val);
6914 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
6915 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6917 /* Not needed in SSA form right now. */
6918 gcc_assert (!gimple_in_ssa_p (cfun));
6919 if (l2_dom_bb == NULL)
6920 l2_dom_bb = l1_bb;
6922 tree n1 = low_val;
6923 tree n2 = high_val;
6925 gimple stmt = gimple_build_assign (ind_var, n1);
6927 /* Replace the GIMPLE_OMP_FOR statement. */
6928 gsi_replace (&gsi, stmt, true);
6930 if (!broken_loop)
6932 /* Code to control the increment goes in the CONT_BB. */
6933 gsi = gsi_last_bb (cont_bb);
6934 stmt = gsi_stmt (gsi);
6935 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6936 stmt = gimple_build_assign (ind_var, PLUS_EXPR, ind_var,
6937 build_one_cst (type));
6939 /* Replace GIMPLE_OMP_CONTINUE. */
6940 gsi_replace (&gsi, stmt, true);
6943 /* Emit the condition in L1_BB. */
6944 gsi = gsi_after_labels (l1_bb);
6945 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
6946 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
6947 fd->loop.step);
6948 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
6949 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6950 fd->loop.n1, fold_convert (sizetype, t));
6951 else
6952 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6953 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
6954 t = fold_convert (TREE_TYPE (fd->loop.v), t);
6955 expand_omp_build_assign (&gsi, fd->loop.v, t);
6957 /* The condition is always '<' since the runtime will fill in the low
6958 and high values. */
6959 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
6960 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6962 /* Remove GIMPLE_OMP_RETURN. */
6963 gsi = gsi_last_bb (exit_bb);
6964 gsi_remove (&gsi, true);
6966 /* Connect the new blocks. */
6967 remove_edge (FALLTHRU_EDGE (entry_bb));
6969 edge e, ne;
6970 if (!broken_loop)
6972 remove_edge (BRANCH_EDGE (entry_bb));
6973 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6975 e = BRANCH_EDGE (l1_bb);
6976 ne = FALLTHRU_EDGE (l1_bb);
6977 e->flags = EDGE_TRUE_VALUE;
6979 else
6981 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6983 ne = single_succ_edge (l1_bb);
6984 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6987 ne->flags = EDGE_FALSE_VALUE;
6988 e->probability = REG_BR_PROB_BASE * 7 / 8;
6989 ne->probability = REG_BR_PROB_BASE / 8;
6991 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6992 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6993 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6995 if (!broken_loop)
6997 struct loop *loop = alloc_loop ();
6998 loop->header = l1_bb;
6999 loop->latch = cont_bb;
7000 add_loop (loop, l1_bb->loop_father);
7001 loop->safelen = INT_MAX;
7004 /* Pick the correct library function based on the precision of the
7005 induction variable type. */
7006 tree lib_fun = NULL_TREE;
7007 if (TYPE_PRECISION (type) == 32)
7008 lib_fun = cilk_for_32_fndecl;
7009 else if (TYPE_PRECISION (type) == 64)
7010 lib_fun = cilk_for_64_fndecl;
7011 else
7012 gcc_unreachable ();
7014 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
7016 /* WS_ARGS contains the library function flavor to call:
7017 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
7018 user-defined grain value. If the user does not define one, then zero
7019 is passed in by the parser. */
7020 vec_alloc (region->ws_args, 2);
7021 region->ws_args->quick_push (lib_fun);
7022 region->ws_args->quick_push (fd->chunk_size);
7025 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
7026 loop. Given parameters:
7028 for (V = N1; V cond N2; V += STEP) BODY;
7030 where COND is "<" or ">", we generate pseudocode
7032 V = N1;
7033 goto L1;
7035 BODY;
7036 V += STEP;
7038 if (V cond N2) goto L0; else goto L2;
7041 For collapsed loops, given parameters:
7042 collapse(3)
7043 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
7044 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
7045 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
7046 BODY;
7048 we generate pseudocode
7050 if (cond3 is <)
7051 adj = STEP3 - 1;
7052 else
7053 adj = STEP3 + 1;
7054 count3 = (adj + N32 - N31) / STEP3;
7055 if (cond2 is <)
7056 adj = STEP2 - 1;
7057 else
7058 adj = STEP2 + 1;
7059 count2 = (adj + N22 - N21) / STEP2;
7060 if (cond1 is <)
7061 adj = STEP1 - 1;
7062 else
7063 adj = STEP1 + 1;
7064 count1 = (adj + N12 - N11) / STEP1;
7065 count = count1 * count2 * count3;
7066 V = 0;
7067 V1 = N11;
7068 V2 = N21;
7069 V3 = N31;
7070 goto L1;
7072 BODY;
7073 V += 1;
7074 V3 += STEP3;
7075 V2 += (V3 cond3 N32) ? 0 : STEP2;
7076 V3 = (V3 cond3 N32) ? V3 : N31;
7077 V1 += (V2 cond2 N22) ? 0 : STEP1;
7078 V2 = (V2 cond2 N22) ? V2 : N21;
7080 if (V < count) goto L0; else goto L2;
7085 static void
7086 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
7088 tree type, t;
7089 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
7090 gimple_stmt_iterator gsi;
7091 gimple stmt;
7092 gcond *cond_stmt;
7093 bool broken_loop = region->cont == NULL;
7094 edge e, ne;
7095 tree *counts = NULL;
7096 int i;
7097 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7098 OMP_CLAUSE_SAFELEN);
7099 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7100 OMP_CLAUSE__SIMDUID_);
7101 tree n1, n2;
7103 type = TREE_TYPE (fd->loop.v);
7104 entry_bb = region->entry;
7105 cont_bb = region->cont;
7106 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7107 gcc_assert (broken_loop
7108 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7109 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7110 if (!broken_loop)
7112 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7113 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7114 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7115 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7117 else
7119 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7120 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7121 l2_bb = single_succ (l1_bb);
7123 exit_bb = region->exit;
7124 l2_dom_bb = NULL;
7126 gsi = gsi_last_bb (entry_bb);
7128 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7129 /* Not needed in SSA form right now. */
7130 gcc_assert (!gimple_in_ssa_p (cfun));
7131 if (fd->collapse > 1)
7133 int first_zero_iter = -1;
7134 basic_block zero_iter_bb = l2_bb;
7136 counts = XALLOCAVEC (tree, fd->collapse);
7137 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
7138 zero_iter_bb, first_zero_iter,
7139 l2_dom_bb);
7141 if (l2_dom_bb == NULL)
7142 l2_dom_bb = l1_bb;
7144 n1 = fd->loop.n1;
7145 n2 = fd->loop.n2;
7146 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7148 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7149 OMP_CLAUSE__LOOPTEMP_);
7150 gcc_assert (innerc);
7151 n1 = OMP_CLAUSE_DECL (innerc);
7152 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7153 OMP_CLAUSE__LOOPTEMP_);
7154 gcc_assert (innerc);
7155 n2 = OMP_CLAUSE_DECL (innerc);
7156 expand_omp_build_assign (&gsi, fd->loop.v,
7157 fold_convert (type, n1));
7158 if (fd->collapse > 1)
7160 gsi_prev (&gsi);
7161 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
7162 gsi_next (&gsi);
7165 else
7167 expand_omp_build_assign (&gsi, fd->loop.v,
7168 fold_convert (type, fd->loop.n1));
7169 if (fd->collapse > 1)
7170 for (i = 0; i < fd->collapse; i++)
7172 tree itype = TREE_TYPE (fd->loops[i].v);
7173 if (POINTER_TYPE_P (itype))
7174 itype = signed_type_for (itype);
7175 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
7176 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7180 /* Remove the GIMPLE_OMP_FOR statement. */
7181 gsi_remove (&gsi, true);
7183 if (!broken_loop)
7185 /* Code to control the increment goes in the CONT_BB. */
7186 gsi = gsi_last_bb (cont_bb);
7187 stmt = gsi_stmt (gsi);
7188 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7190 if (POINTER_TYPE_P (type))
7191 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
7192 else
7193 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
7194 expand_omp_build_assign (&gsi, fd->loop.v, t);
7196 if (fd->collapse > 1)
7198 i = fd->collapse - 1;
7199 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
7201 t = fold_convert (sizetype, fd->loops[i].step);
7202 t = fold_build_pointer_plus (fd->loops[i].v, t);
7204 else
7206 t = fold_convert (TREE_TYPE (fd->loops[i].v),
7207 fd->loops[i].step);
7208 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
7209 fd->loops[i].v, t);
7211 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7213 for (i = fd->collapse - 1; i > 0; i--)
7215 tree itype = TREE_TYPE (fd->loops[i].v);
7216 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
7217 if (POINTER_TYPE_P (itype2))
7218 itype2 = signed_type_for (itype2);
7219 t = build3 (COND_EXPR, itype2,
7220 build2 (fd->loops[i].cond_code, boolean_type_node,
7221 fd->loops[i].v,
7222 fold_convert (itype, fd->loops[i].n2)),
7223 build_int_cst (itype2, 0),
7224 fold_convert (itype2, fd->loops[i - 1].step));
7225 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
7226 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
7227 else
7228 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
7229 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
7231 t = build3 (COND_EXPR, itype,
7232 build2 (fd->loops[i].cond_code, boolean_type_node,
7233 fd->loops[i].v,
7234 fold_convert (itype, fd->loops[i].n2)),
7235 fd->loops[i].v,
7236 fold_convert (itype, fd->loops[i].n1));
7237 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7241 /* Remove GIMPLE_OMP_CONTINUE. */
7242 gsi_remove (&gsi, true);
7245 /* Emit the condition in L1_BB. */
7246 gsi = gsi_start_bb (l1_bb);
7248 t = fold_convert (type, n2);
7249 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7250 false, GSI_CONTINUE_LINKING);
7251 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
7252 cond_stmt = gimple_build_cond_empty (t);
7253 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
7254 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p,
7255 NULL, NULL)
7256 || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p,
7257 NULL, NULL))
7259 gsi = gsi_for_stmt (cond_stmt);
7260 gimple_regimplify_operands (cond_stmt, &gsi);
7263 /* Remove GIMPLE_OMP_RETURN. */
7264 gsi = gsi_last_bb (exit_bb);
7265 gsi_remove (&gsi, true);
7267 /* Connect the new blocks. */
7268 remove_edge (FALLTHRU_EDGE (entry_bb));
7270 if (!broken_loop)
7272 remove_edge (BRANCH_EDGE (entry_bb));
7273 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7275 e = BRANCH_EDGE (l1_bb);
7276 ne = FALLTHRU_EDGE (l1_bb);
7277 e->flags = EDGE_TRUE_VALUE;
7279 else
7281 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7283 ne = single_succ_edge (l1_bb);
7284 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7287 ne->flags = EDGE_FALSE_VALUE;
7288 e->probability = REG_BR_PROB_BASE * 7 / 8;
7289 ne->probability = REG_BR_PROB_BASE / 8;
7291 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7292 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7293 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7295 if (!broken_loop)
7297 struct loop *loop = alloc_loop ();
7298 loop->header = l1_bb;
7299 loop->latch = cont_bb;
7300 add_loop (loop, l1_bb->loop_father);
7301 if (safelen == NULL_TREE)
7302 loop->safelen = INT_MAX;
7303 else
7305 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
7306 if (TREE_CODE (safelen) != INTEGER_CST)
7307 loop->safelen = 0;
7308 else if (!tree_fits_uhwi_p (safelen)
7309 || tree_to_uhwi (safelen) > INT_MAX)
7310 loop->safelen = INT_MAX;
7311 else
7312 loop->safelen = tree_to_uhwi (safelen);
7313 if (loop->safelen == 1)
7314 loop->safelen = 0;
7316 if (simduid)
7318 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
7319 cfun->has_simduid_loops = true;
7321 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7322 the loop. */
7323 if ((flag_tree_loop_vectorize
7324 || (!global_options_set.x_flag_tree_loop_vectorize
7325 && !global_options_set.x_flag_tree_vectorize))
7326 && flag_tree_loop_optimize
7327 && loop->safelen > 1)
7329 loop->force_vectorize = true;
7330 cfun->has_force_vectorize_loops = true;
7336 /* Expand the OpenMP loop defined by REGION. */
7338 static void
7339 expand_omp_for (struct omp_region *region, gimple inner_stmt)
7341 struct omp_for_data fd;
7342 struct omp_for_data_loop *loops;
7344 loops
7345 = (struct omp_for_data_loop *)
7346 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
7347 * sizeof (struct omp_for_data_loop));
7348 extract_omp_for_data (as_a <gomp_for *> (last_stmt (region->entry)),
7349 &fd, loops);
7350 region->sched_kind = fd.sched_kind;
7352 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
7353 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7354 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7355 if (region->cont)
7357 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
7358 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7359 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7361 else
7362 /* If there isn't a continue then this is a degerate case where
7363 the introduction of abnormal edges during lowering will prevent
7364 original loops from being detected. Fix that up. */
7365 loops_state_set (LOOPS_NEED_FIXUP);
7367 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
7368 expand_omp_simd (region, &fd);
7369 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
7370 expand_cilk_for (region, &fd);
7371 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
7372 && !fd.have_ordered)
7374 if (fd.chunk_size == NULL)
7375 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
7376 else
7377 expand_omp_for_static_chunk (region, &fd, inner_stmt);
7379 else
7381 int fn_index, start_ix, next_ix;
7383 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
7384 == GF_OMP_FOR_KIND_FOR);
7385 if (fd.chunk_size == NULL
7386 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
7387 fd.chunk_size = integer_zero_node;
7388 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
7389 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
7390 ? 3 : fd.sched_kind;
7391 fn_index += fd.have_ordered * 4;
7392 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
7393 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
7394 if (fd.iter_type == long_long_unsigned_type_node)
7396 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
7397 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
7398 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
7399 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
7401 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
7402 (enum built_in_function) next_ix, inner_stmt);
7405 if (gimple_in_ssa_p (cfun))
7406 update_ssa (TODO_update_ssa_only_virtuals);
7410 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7412 v = GOMP_sections_start (n);
7414 switch (v)
7416 case 0:
7417 goto L2;
7418 case 1:
7419 section 1;
7420 goto L1;
7421 case 2:
7423 case n:
7425 default:
7426 abort ();
7429 v = GOMP_sections_next ();
7430 goto L0;
7432 reduction;
7434 If this is a combined parallel sections, replace the call to
7435 GOMP_sections_start with call to GOMP_sections_next. */
7437 static void
7438 expand_omp_sections (struct omp_region *region)
7440 tree t, u, vin = NULL, vmain, vnext, l2;
7441 unsigned len;
7442 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7443 gimple_stmt_iterator si, switch_si;
7444 gomp_sections *sections_stmt;
7445 gimple stmt;
7446 gomp_continue *cont;
7447 edge_iterator ei;
7448 edge e;
7449 struct omp_region *inner;
7450 unsigned i, casei;
7451 bool exit_reachable = region->cont != NULL;
7453 gcc_assert (region->exit != NULL);
7454 entry_bb = region->entry;
7455 l0_bb = single_succ (entry_bb);
7456 l1_bb = region->cont;
7457 l2_bb = region->exit;
7458 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7459 l2 = gimple_block_label (l2_bb);
7460 else
7462 /* This can happen if there are reductions. */
7463 len = EDGE_COUNT (l0_bb->succs);
7464 gcc_assert (len > 0);
7465 e = EDGE_SUCC (l0_bb, len - 1);
7466 si = gsi_last_bb (e->dest);
7467 l2 = NULL_TREE;
7468 if (gsi_end_p (si)
7469 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7470 l2 = gimple_block_label (e->dest);
7471 else
7472 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7474 si = gsi_last_bb (e->dest);
7475 if (gsi_end_p (si)
7476 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7478 l2 = gimple_block_label (e->dest);
7479 break;
7483 if (exit_reachable)
7484 default_bb = create_empty_bb (l1_bb->prev_bb);
7485 else
7486 default_bb = create_empty_bb (l0_bb);
7488 /* We will build a switch() with enough cases for all the
7489 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7490 and a default case to abort if something goes wrong. */
7491 len = EDGE_COUNT (l0_bb->succs);
7493 /* Use vec::quick_push on label_vec throughout, since we know the size
7494 in advance. */
7495 auto_vec<tree> label_vec (len);
7497 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7498 GIMPLE_OMP_SECTIONS statement. */
7499 si = gsi_last_bb (entry_bb);
7500 sections_stmt = as_a <gomp_sections *> (gsi_stmt (si));
7501 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7502 vin = gimple_omp_sections_control (sections_stmt);
7503 if (!is_combined_parallel (region))
7505 /* If we are not inside a combined parallel+sections region,
7506 call GOMP_sections_start. */
7507 t = build_int_cst (unsigned_type_node, len - 1);
7508 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7509 stmt = gimple_build_call (u, 1, t);
7511 else
7513 /* Otherwise, call GOMP_sections_next. */
7514 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7515 stmt = gimple_build_call (u, 0);
7517 gimple_call_set_lhs (stmt, vin);
7518 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7519 gsi_remove (&si, true);
7521 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7522 L0_BB. */
7523 switch_si = gsi_last_bb (l0_bb);
7524 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
7525 if (exit_reachable)
7527 cont = as_a <gomp_continue *> (last_stmt (l1_bb));
7528 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
7529 vmain = gimple_omp_continue_control_use (cont);
7530 vnext = gimple_omp_continue_control_def (cont);
7532 else
7534 vmain = vin;
7535 vnext = NULL_TREE;
7538 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
7539 label_vec.quick_push (t);
7540 i = 1;
7542 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7543 for (inner = region->inner, casei = 1;
7544 inner;
7545 inner = inner->next, i++, casei++)
7547 basic_block s_entry_bb, s_exit_bb;
7549 /* Skip optional reduction region. */
7550 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7552 --i;
7553 --casei;
7554 continue;
7557 s_entry_bb = inner->entry;
7558 s_exit_bb = inner->exit;
7560 t = gimple_block_label (s_entry_bb);
7561 u = build_int_cst (unsigned_type_node, casei);
7562 u = build_case_label (u, NULL, t);
7563 label_vec.quick_push (u);
7565 si = gsi_last_bb (s_entry_bb);
7566 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7567 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7568 gsi_remove (&si, true);
7569 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7571 if (s_exit_bb == NULL)
7572 continue;
7574 si = gsi_last_bb (s_exit_bb);
7575 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7576 gsi_remove (&si, true);
7578 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7581 /* Error handling code goes in DEFAULT_BB. */
7582 t = gimple_block_label (default_bb);
7583 u = build_case_label (NULL, NULL, t);
7584 make_edge (l0_bb, default_bb, 0);
7585 add_bb_to_loop (default_bb, current_loops->tree_root);
7587 stmt = gimple_build_switch (vmain, u, label_vec);
7588 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7589 gsi_remove (&switch_si, true);
7591 si = gsi_start_bb (default_bb);
7592 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7593 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7595 if (exit_reachable)
7597 tree bfn_decl;
7599 /* Code to get the next section goes in L1_BB. */
7600 si = gsi_last_bb (l1_bb);
7601 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7603 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7604 stmt = gimple_build_call (bfn_decl, 0);
7605 gimple_call_set_lhs (stmt, vnext);
7606 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7607 gsi_remove (&si, true);
7609 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7612 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7613 si = gsi_last_bb (l2_bb);
7614 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7615 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7616 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7617 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7618 else
7619 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7620 stmt = gimple_build_call (t, 0);
7621 if (gimple_omp_return_lhs (gsi_stmt (si)))
7622 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7623 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7624 gsi_remove (&si, true);
7626 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7630 /* Expand code for an OpenMP single directive. We've already expanded
7631 much of the code, here we simply place the GOMP_barrier call. */
7633 static void
7634 expand_omp_single (struct omp_region *region)
7636 basic_block entry_bb, exit_bb;
7637 gimple_stmt_iterator si;
7639 entry_bb = region->entry;
7640 exit_bb = region->exit;
7642 si = gsi_last_bb (entry_bb);
7643 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7644 gsi_remove (&si, true);
7645 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7647 si = gsi_last_bb (exit_bb);
7648 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7650 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7651 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7653 gsi_remove (&si, true);
7654 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7658 /* Generic expansion for OpenMP synchronization directives: master,
7659 ordered and critical. All we need to do here is remove the entry
7660 and exit markers for REGION. */
7662 static void
7663 expand_omp_synch (struct omp_region *region)
7665 basic_block entry_bb, exit_bb;
7666 gimple_stmt_iterator si;
7668 entry_bb = region->entry;
7669 exit_bb = region->exit;
7671 si = gsi_last_bb (entry_bb);
7672 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7673 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7674 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7675 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7676 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7677 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7678 gsi_remove (&si, true);
7679 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7681 if (exit_bb)
7683 si = gsi_last_bb (exit_bb);
7684 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7685 gsi_remove (&si, true);
7686 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7690 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7691 operation as a normal volatile load. */
7693 static bool
7694 expand_omp_atomic_load (basic_block load_bb, tree addr,
7695 tree loaded_val, int index)
7697 enum built_in_function tmpbase;
7698 gimple_stmt_iterator gsi;
7699 basic_block store_bb;
7700 location_t loc;
7701 gimple stmt;
7702 tree decl, call, type, itype;
7704 gsi = gsi_last_bb (load_bb);
7705 stmt = gsi_stmt (gsi);
7706 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7707 loc = gimple_location (stmt);
7709 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7710 is smaller than word size, then expand_atomic_load assumes that the load
7711 is atomic. We could avoid the builtin entirely in this case. */
7713 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7714 decl = builtin_decl_explicit (tmpbase);
7715 if (decl == NULL_TREE)
7716 return false;
7718 type = TREE_TYPE (loaded_val);
7719 itype = TREE_TYPE (TREE_TYPE (decl));
7721 call = build_call_expr_loc (loc, decl, 2, addr,
7722 build_int_cst (NULL,
7723 gimple_omp_atomic_seq_cst_p (stmt)
7724 ? MEMMODEL_SEQ_CST
7725 : MEMMODEL_RELAXED));
7726 if (!useless_type_conversion_p (type, itype))
7727 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7728 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7730 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7731 gsi_remove (&gsi, true);
7733 store_bb = single_succ (load_bb);
7734 gsi = gsi_last_bb (store_bb);
7735 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7736 gsi_remove (&gsi, true);
7738 if (gimple_in_ssa_p (cfun))
7739 update_ssa (TODO_update_ssa_no_phi);
7741 return true;
7744 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7745 operation as a normal volatile store. */
7747 static bool
7748 expand_omp_atomic_store (basic_block load_bb, tree addr,
7749 tree loaded_val, tree stored_val, int index)
7751 enum built_in_function tmpbase;
7752 gimple_stmt_iterator gsi;
7753 basic_block store_bb = single_succ (load_bb);
7754 location_t loc;
7755 gimple stmt;
7756 tree decl, call, type, itype;
7757 machine_mode imode;
7758 bool exchange;
7760 gsi = gsi_last_bb (load_bb);
7761 stmt = gsi_stmt (gsi);
7762 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7764 /* If the load value is needed, then this isn't a store but an exchange. */
7765 exchange = gimple_omp_atomic_need_value_p (stmt);
7767 gsi = gsi_last_bb (store_bb);
7768 stmt = gsi_stmt (gsi);
7769 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7770 loc = gimple_location (stmt);
7772 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7773 is smaller than word size, then expand_atomic_store assumes that the store
7774 is atomic. We could avoid the builtin entirely in this case. */
7776 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7777 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7778 decl = builtin_decl_explicit (tmpbase);
7779 if (decl == NULL_TREE)
7780 return false;
7782 type = TREE_TYPE (stored_val);
7784 /* Dig out the type of the function's second argument. */
7785 itype = TREE_TYPE (decl);
7786 itype = TYPE_ARG_TYPES (itype);
7787 itype = TREE_CHAIN (itype);
7788 itype = TREE_VALUE (itype);
7789 imode = TYPE_MODE (itype);
7791 if (exchange && !can_atomic_exchange_p (imode, true))
7792 return false;
7794 if (!useless_type_conversion_p (itype, type))
7795 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7796 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7797 build_int_cst (NULL,
7798 gimple_omp_atomic_seq_cst_p (stmt)
7799 ? MEMMODEL_SEQ_CST
7800 : MEMMODEL_RELAXED));
7801 if (exchange)
7803 if (!useless_type_conversion_p (type, itype))
7804 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7805 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7808 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7809 gsi_remove (&gsi, true);
7811 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7812 gsi = gsi_last_bb (load_bb);
7813 gsi_remove (&gsi, true);
7815 if (gimple_in_ssa_p (cfun))
7816 update_ssa (TODO_update_ssa_no_phi);
7818 return true;
7821 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7822 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7823 size of the data type, and thus usable to find the index of the builtin
7824 decl. Returns false if the expression is not of the proper form. */
7826 static bool
7827 expand_omp_atomic_fetch_op (basic_block load_bb,
7828 tree addr, tree loaded_val,
7829 tree stored_val, int index)
7831 enum built_in_function oldbase, newbase, tmpbase;
7832 tree decl, itype, call;
7833 tree lhs, rhs;
7834 basic_block store_bb = single_succ (load_bb);
7835 gimple_stmt_iterator gsi;
7836 gimple stmt;
7837 location_t loc;
7838 enum tree_code code;
7839 bool need_old, need_new;
7840 machine_mode imode;
7841 bool seq_cst;
7843 /* We expect to find the following sequences:
7845 load_bb:
7846 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7848 store_bb:
7849 val = tmp OP something; (or: something OP tmp)
7850 GIMPLE_OMP_STORE (val)
7852 ???FIXME: Allow a more flexible sequence.
7853 Perhaps use data flow to pick the statements.
7857 gsi = gsi_after_labels (store_bb);
7858 stmt = gsi_stmt (gsi);
7859 loc = gimple_location (stmt);
7860 if (!is_gimple_assign (stmt))
7861 return false;
7862 gsi_next (&gsi);
7863 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7864 return false;
7865 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7866 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7867 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7868 gcc_checking_assert (!need_old || !need_new);
7870 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7871 return false;
7873 /* Check for one of the supported fetch-op operations. */
7874 code = gimple_assign_rhs_code (stmt);
7875 switch (code)
7877 case PLUS_EXPR:
7878 case POINTER_PLUS_EXPR:
7879 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7880 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7881 break;
7882 case MINUS_EXPR:
7883 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7884 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7885 break;
7886 case BIT_AND_EXPR:
7887 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7888 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7889 break;
7890 case BIT_IOR_EXPR:
7891 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7892 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7893 break;
7894 case BIT_XOR_EXPR:
7895 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7896 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7897 break;
7898 default:
7899 return false;
7902 /* Make sure the expression is of the proper form. */
7903 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7904 rhs = gimple_assign_rhs2 (stmt);
7905 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7906 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7907 rhs = gimple_assign_rhs1 (stmt);
7908 else
7909 return false;
7911 tmpbase = ((enum built_in_function)
7912 ((need_new ? newbase : oldbase) + index + 1));
7913 decl = builtin_decl_explicit (tmpbase);
7914 if (decl == NULL_TREE)
7915 return false;
7916 itype = TREE_TYPE (TREE_TYPE (decl));
7917 imode = TYPE_MODE (itype);
7919 /* We could test all of the various optabs involved, but the fact of the
7920 matter is that (with the exception of i486 vs i586 and xadd) all targets
7921 that support any atomic operaton optab also implements compare-and-swap.
7922 Let optabs.c take care of expanding any compare-and-swap loop. */
7923 if (!can_compare_and_swap_p (imode, true))
7924 return false;
7926 gsi = gsi_last_bb (load_bb);
7927 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7929 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7930 It only requires that the operation happen atomically. Thus we can
7931 use the RELAXED memory model. */
7932 call = build_call_expr_loc (loc, decl, 3, addr,
7933 fold_convert_loc (loc, itype, rhs),
7934 build_int_cst (NULL,
7935 seq_cst ? MEMMODEL_SEQ_CST
7936 : MEMMODEL_RELAXED));
7938 if (need_old || need_new)
7940 lhs = need_old ? loaded_val : stored_val;
7941 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7942 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7944 else
7945 call = fold_convert_loc (loc, void_type_node, call);
7946 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7947 gsi_remove (&gsi, true);
7949 gsi = gsi_last_bb (store_bb);
7950 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7951 gsi_remove (&gsi, true);
7952 gsi = gsi_last_bb (store_bb);
7953 gsi_remove (&gsi, true);
7955 if (gimple_in_ssa_p (cfun))
7956 update_ssa (TODO_update_ssa_no_phi);
7958 return true;
7961 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7963 oldval = *addr;
7964 repeat:
7965 newval = rhs; // with oldval replacing *addr in rhs
7966 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7967 if (oldval != newval)
7968 goto repeat;
7970 INDEX is log2 of the size of the data type, and thus usable to find the
7971 index of the builtin decl. */
7973 static bool
7974 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7975 tree addr, tree loaded_val, tree stored_val,
7976 int index)
7978 tree loadedi, storedi, initial, new_storedi, old_vali;
7979 tree type, itype, cmpxchg, iaddr;
7980 gimple_stmt_iterator si;
7981 basic_block loop_header = single_succ (load_bb);
7982 gimple phi, stmt;
7983 edge e;
7984 enum built_in_function fncode;
7986 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7987 order to use the RELAXED memory model effectively. */
7988 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7989 + index + 1);
7990 cmpxchg = builtin_decl_explicit (fncode);
7991 if (cmpxchg == NULL_TREE)
7992 return false;
7993 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7994 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7996 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7997 return false;
7999 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
8000 si = gsi_last_bb (load_bb);
8001 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8003 /* For floating-point values, we'll need to view-convert them to integers
8004 so that we can perform the atomic compare and swap. Simplify the
8005 following code by always setting up the "i"ntegral variables. */
8006 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
8008 tree iaddr_val;
8010 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
8011 true));
8012 iaddr_val
8013 = force_gimple_operand_gsi (&si,
8014 fold_convert (TREE_TYPE (iaddr), addr),
8015 false, NULL_TREE, true, GSI_SAME_STMT);
8016 stmt = gimple_build_assign (iaddr, iaddr_val);
8017 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8018 loadedi = create_tmp_var (itype);
8019 if (gimple_in_ssa_p (cfun))
8020 loadedi = make_ssa_name (loadedi);
8022 else
8024 iaddr = addr;
8025 loadedi = loaded_val;
8028 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
8029 tree loaddecl = builtin_decl_explicit (fncode);
8030 if (loaddecl)
8031 initial
8032 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
8033 build_call_expr (loaddecl, 2, iaddr,
8034 build_int_cst (NULL_TREE,
8035 MEMMODEL_RELAXED)));
8036 else
8037 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
8038 build_int_cst (TREE_TYPE (iaddr), 0));
8040 initial
8041 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
8042 GSI_SAME_STMT);
8044 /* Move the value to the LOADEDI temporary. */
8045 if (gimple_in_ssa_p (cfun))
8047 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
8048 phi = create_phi_node (loadedi, loop_header);
8049 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
8050 initial);
8052 else
8053 gsi_insert_before (&si,
8054 gimple_build_assign (loadedi, initial),
8055 GSI_SAME_STMT);
8056 if (loadedi != loaded_val)
8058 gimple_stmt_iterator gsi2;
8059 tree x;
8061 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
8062 gsi2 = gsi_start_bb (loop_header);
8063 if (gimple_in_ssa_p (cfun))
8065 gassign *stmt;
8066 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8067 true, GSI_SAME_STMT);
8068 stmt = gimple_build_assign (loaded_val, x);
8069 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
8071 else
8073 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
8074 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8075 true, GSI_SAME_STMT);
8078 gsi_remove (&si, true);
8080 si = gsi_last_bb (store_bb);
8081 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8083 if (iaddr == addr)
8084 storedi = stored_val;
8085 else
8086 storedi =
8087 force_gimple_operand_gsi (&si,
8088 build1 (VIEW_CONVERT_EXPR, itype,
8089 stored_val), true, NULL_TREE, true,
8090 GSI_SAME_STMT);
8092 /* Build the compare&swap statement. */
8093 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
8094 new_storedi = force_gimple_operand_gsi (&si,
8095 fold_convert (TREE_TYPE (loadedi),
8096 new_storedi),
8097 true, NULL_TREE,
8098 true, GSI_SAME_STMT);
8100 if (gimple_in_ssa_p (cfun))
8101 old_vali = loadedi;
8102 else
8104 old_vali = create_tmp_var (TREE_TYPE (loadedi));
8105 stmt = gimple_build_assign (old_vali, loadedi);
8106 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8108 stmt = gimple_build_assign (loadedi, new_storedi);
8109 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8112 /* Note that we always perform the comparison as an integer, even for
8113 floating point. This allows the atomic operation to properly
8114 succeed even with NaNs and -0.0. */
8115 stmt = gimple_build_cond_empty
8116 (build2 (NE_EXPR, boolean_type_node,
8117 new_storedi, old_vali));
8118 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8120 /* Update cfg. */
8121 e = single_succ_edge (store_bb);
8122 e->flags &= ~EDGE_FALLTHRU;
8123 e->flags |= EDGE_FALSE_VALUE;
8125 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
8127 /* Copy the new value to loadedi (we already did that before the condition
8128 if we are not in SSA). */
8129 if (gimple_in_ssa_p (cfun))
8131 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
8132 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
8135 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
8136 gsi_remove (&si, true);
8138 struct loop *loop = alloc_loop ();
8139 loop->header = loop_header;
8140 loop->latch = store_bb;
8141 add_loop (loop, loop_header->loop_father);
8143 if (gimple_in_ssa_p (cfun))
8144 update_ssa (TODO_update_ssa_no_phi);
8146 return true;
8149 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8151 GOMP_atomic_start ();
8152 *addr = rhs;
8153 GOMP_atomic_end ();
8155 The result is not globally atomic, but works so long as all parallel
8156 references are within #pragma omp atomic directives. According to
8157 responses received from omp@openmp.org, appears to be within spec.
8158 Which makes sense, since that's how several other compilers handle
8159 this situation as well.
8160 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
8161 expanding. STORED_VAL is the operand of the matching
8162 GIMPLE_OMP_ATOMIC_STORE.
8164 We replace
8165 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
8166 loaded_val = *addr;
8168 and replace
8169 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
8170 *addr = stored_val;
8173 static bool
8174 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
8175 tree addr, tree loaded_val, tree stored_val)
8177 gimple_stmt_iterator si;
8178 gassign *stmt;
8179 tree t;
8181 si = gsi_last_bb (load_bb);
8182 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8184 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
8185 t = build_call_expr (t, 0);
8186 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8188 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
8189 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8190 gsi_remove (&si, true);
8192 si = gsi_last_bb (store_bb);
8193 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8195 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
8196 stored_val);
8197 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8199 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
8200 t = build_call_expr (t, 0);
8201 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8202 gsi_remove (&si, true);
8204 if (gimple_in_ssa_p (cfun))
8205 update_ssa (TODO_update_ssa_no_phi);
8206 return true;
8209 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
8210 using expand_omp_atomic_fetch_op. If it failed, we try to
8211 call expand_omp_atomic_pipeline, and if it fails too, the
8212 ultimate fallback is wrapping the operation in a mutex
8213 (expand_omp_atomic_mutex). REGION is the atomic region built
8214 by build_omp_regions_1(). */
8216 static void
8217 expand_omp_atomic (struct omp_region *region)
8219 basic_block load_bb = region->entry, store_bb = region->exit;
8220 gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb));
8221 gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb));
8222 tree loaded_val = gimple_omp_atomic_load_lhs (load);
8223 tree addr = gimple_omp_atomic_load_rhs (load);
8224 tree stored_val = gimple_omp_atomic_store_val (store);
8225 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8226 HOST_WIDE_INT index;
8228 /* Make sure the type is one of the supported sizes. */
8229 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
8230 index = exact_log2 (index);
8231 if (index >= 0 && index <= 4)
8233 unsigned int align = TYPE_ALIGN_UNIT (type);
8235 /* __sync builtins require strict data alignment. */
8236 if (exact_log2 (align) >= index)
8238 /* Atomic load. */
8239 if (loaded_val == stored_val
8240 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8241 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8242 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8243 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
8244 return;
8246 /* Atomic store. */
8247 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8248 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8249 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8250 && store_bb == single_succ (load_bb)
8251 && first_stmt (store_bb) == store
8252 && expand_omp_atomic_store (load_bb, addr, loaded_val,
8253 stored_val, index))
8254 return;
8256 /* When possible, use specialized atomic update functions. */
8257 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
8258 && store_bb == single_succ (load_bb)
8259 && expand_omp_atomic_fetch_op (load_bb, addr,
8260 loaded_val, stored_val, index))
8261 return;
8263 /* If we don't have specialized __sync builtins, try and implement
8264 as a compare and swap loop. */
8265 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
8266 loaded_val, stored_val, index))
8267 return;
8271 /* The ultimate fallback is wrapping the operation in a mutex. */
8272 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
8276 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
8278 static void
8279 expand_omp_target (struct omp_region *region)
8281 basic_block entry_bb, exit_bb, new_bb;
8282 struct function *child_cfun = NULL;
8283 tree child_fn = NULL_TREE, block, t;
8284 gimple_stmt_iterator gsi;
8285 gomp_target *entry_stmt;
8286 gimple stmt;
8287 edge e;
8289 entry_stmt = as_a <gomp_target *> (last_stmt (region->entry));
8290 new_bb = region->entry;
8291 int kind = gimple_omp_target_kind (entry_stmt);
8292 if (kind == GF_OMP_TARGET_KIND_REGION)
8294 child_fn = gimple_omp_target_child_fn (entry_stmt);
8295 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
8298 entry_bb = region->entry;
8299 exit_bb = region->exit;
8301 if (kind == GF_OMP_TARGET_KIND_REGION)
8303 unsigned srcidx, dstidx, num;
8305 /* If the target region needs data sent from the parent
8306 function, then the very first statement (except possible
8307 tree profile counter updates) of the parallel body
8308 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
8309 &.OMP_DATA_O is passed as an argument to the child function,
8310 we need to replace it with the argument as seen by the child
8311 function.
8313 In most cases, this will end up being the identity assignment
8314 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
8315 a function call that has been inlined, the original PARM_DECL
8316 .OMP_DATA_I may have been converted into a different local
8317 variable. In which case, we need to keep the assignment. */
8318 if (gimple_omp_target_data_arg (entry_stmt))
8320 basic_block entry_succ_bb = single_succ (entry_bb);
8321 gimple_stmt_iterator gsi;
8322 tree arg;
8323 gimple tgtcopy_stmt = NULL;
8324 tree sender
8325 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
8327 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
8329 gcc_assert (!gsi_end_p (gsi));
8330 stmt = gsi_stmt (gsi);
8331 if (gimple_code (stmt) != GIMPLE_ASSIGN)
8332 continue;
8334 if (gimple_num_ops (stmt) == 2)
8336 tree arg = gimple_assign_rhs1 (stmt);
8338 /* We're ignoring the subcode because we're
8339 effectively doing a STRIP_NOPS. */
8341 if (TREE_CODE (arg) == ADDR_EXPR
8342 && TREE_OPERAND (arg, 0) == sender)
8344 tgtcopy_stmt = stmt;
8345 break;
8350 gcc_assert (tgtcopy_stmt != NULL);
8351 arg = DECL_ARGUMENTS (child_fn);
8353 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
8354 gsi_remove (&gsi, true);
8357 /* Declare local variables needed in CHILD_CFUN. */
8358 block = DECL_INITIAL (child_fn);
8359 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
8360 /* The gimplifier could record temporaries in target block
8361 rather than in containing function's local_decls chain,
8362 which would mean cgraph missed finalizing them. Do it now. */
8363 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
8364 if (TREE_CODE (t) == VAR_DECL
8365 && TREE_STATIC (t)
8366 && !DECL_EXTERNAL (t))
8367 varpool_node::finalize_decl (t);
8368 DECL_SAVED_TREE (child_fn) = NULL;
8369 /* We'll create a CFG for child_fn, so no gimple body is needed. */
8370 gimple_set_body (child_fn, NULL);
8371 TREE_USED (block) = 1;
8373 /* Reset DECL_CONTEXT on function arguments. */
8374 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
8375 DECL_CONTEXT (t) = child_fn;
8377 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
8378 so that it can be moved to the child function. */
8379 gsi = gsi_last_bb (entry_bb);
8380 stmt = gsi_stmt (gsi);
8381 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
8382 && gimple_omp_target_kind (stmt)
8383 == GF_OMP_TARGET_KIND_REGION);
8384 gsi_remove (&gsi, true);
8385 e = split_block (entry_bb, stmt);
8386 entry_bb = e->dest;
8387 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8389 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
8390 if (exit_bb)
8392 gsi = gsi_last_bb (exit_bb);
8393 gcc_assert (!gsi_end_p (gsi)
8394 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
8395 stmt = gimple_build_return (NULL);
8396 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8397 gsi_remove (&gsi, true);
8400 /* Move the target region into CHILD_CFUN. */
8402 block = gimple_block (entry_stmt);
8404 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
8405 if (exit_bb)
8406 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
8407 /* When the OMP expansion process cannot guarantee an up-to-date
8408 loop tree arrange for the child function to fixup loops. */
8409 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8410 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8412 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8413 num = vec_safe_length (child_cfun->local_decls);
8414 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8416 t = (*child_cfun->local_decls)[srcidx];
8417 if (DECL_CONTEXT (t) == cfun->decl)
8418 continue;
8419 if (srcidx != dstidx)
8420 (*child_cfun->local_decls)[dstidx] = t;
8421 dstidx++;
8423 if (dstidx != num)
8424 vec_safe_truncate (child_cfun->local_decls, dstidx);
8426 /* Inform the callgraph about the new function. */
8427 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
8428 cgraph_node::add_new_function (child_fn, true);
8430 #ifdef ENABLE_OFFLOADING
8431 /* Add the new function to the offload table. */
8432 vec_safe_push (offload_funcs, child_fn);
8433 #endif
8435 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8436 fixed in a following pass. */
8437 push_cfun (child_cfun);
8438 cgraph_edge::rebuild_edges ();
8440 #ifdef ENABLE_OFFLOADING
8441 /* Prevent IPA from removing child_fn as unreachable, since there are no
8442 refs from the parent function to child_fn in offload LTO mode. */
8443 struct cgraph_node *node = cgraph_node::get (child_fn);
8444 node->mark_force_output ();
8445 #endif
8447 /* Some EH regions might become dead, see PR34608. If
8448 pass_cleanup_cfg isn't the first pass to happen with the
8449 new child, these dead EH edges might cause problems.
8450 Clean them up now. */
8451 if (flag_exceptions)
8453 basic_block bb;
8454 bool changed = false;
8456 FOR_EACH_BB_FN (bb, cfun)
8457 changed |= gimple_purge_dead_eh_edges (bb);
8458 if (changed)
8459 cleanup_tree_cfg ();
8461 pop_cfun ();
8464 /* Emit a library call to launch the target region, or do data
8465 transfers. */
8466 tree t1, t2, t3, t4, device, cond, c, clauses;
8467 enum built_in_function start_ix;
8468 location_t clause_loc;
8470 clauses = gimple_omp_target_clauses (entry_stmt);
8472 if (kind == GF_OMP_TARGET_KIND_REGION)
8473 start_ix = BUILT_IN_GOMP_TARGET;
8474 else if (kind == GF_OMP_TARGET_KIND_DATA)
8475 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8476 else
8477 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8479 /* By default, the value of DEVICE is -1 (let runtime library choose)
8480 and there is no conditional. */
8481 cond = NULL_TREE;
8482 device = build_int_cst (integer_type_node, -1);
8484 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
8485 if (c)
8486 cond = OMP_CLAUSE_IF_EXPR (c);
8488 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
8489 if (c)
8491 device = OMP_CLAUSE_DEVICE_ID (c);
8492 clause_loc = OMP_CLAUSE_LOCATION (c);
8494 else
8495 clause_loc = gimple_location (entry_stmt);
8497 /* Ensure 'device' is of the correct type. */
8498 device = fold_convert_loc (clause_loc, integer_type_node, device);
8500 /* If we found the clause 'if (cond)', build
8501 (cond ? device : -2). */
8502 if (cond)
8504 cond = gimple_boolify (cond);
8506 basic_block cond_bb, then_bb, else_bb;
8507 edge e;
8508 tree tmp_var;
8510 tmp_var = create_tmp_var (TREE_TYPE (device));
8511 if (kind != GF_OMP_TARGET_KIND_REGION)
8513 gsi = gsi_last_bb (new_bb);
8514 gsi_prev (&gsi);
8515 e = split_block (new_bb, gsi_stmt (gsi));
8517 else
8518 e = split_block (new_bb, NULL);
8519 cond_bb = e->src;
8520 new_bb = e->dest;
8521 remove_edge (e);
8523 then_bb = create_empty_bb (cond_bb);
8524 else_bb = create_empty_bb (then_bb);
8525 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
8526 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
8528 stmt = gimple_build_cond_empty (cond);
8529 gsi = gsi_last_bb (cond_bb);
8530 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8532 gsi = gsi_start_bb (then_bb);
8533 stmt = gimple_build_assign (tmp_var, device);
8534 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8536 gsi = gsi_start_bb (else_bb);
8537 stmt = gimple_build_assign (tmp_var,
8538 build_int_cst (integer_type_node, -2));
8539 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8541 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
8542 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
8543 add_bb_to_loop (then_bb, cond_bb->loop_father);
8544 add_bb_to_loop (else_bb, cond_bb->loop_father);
8545 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
8546 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
8548 device = tmp_var;
8551 gsi = gsi_last_bb (new_bb);
8552 t = gimple_omp_target_data_arg (entry_stmt);
8553 if (t == NULL)
8555 t1 = size_zero_node;
8556 t2 = build_zero_cst (ptr_type_node);
8557 t3 = t2;
8558 t4 = t2;
8560 else
8562 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8563 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8564 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8565 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8566 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8569 gimple g;
8570 /* FIXME: This will be address of
8571 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8572 symbol, as soon as the linker plugin is able to create it for us. */
8573 tree openmp_target = build_zero_cst (ptr_type_node);
8574 if (kind == GF_OMP_TARGET_KIND_REGION)
8576 tree fnaddr = build_fold_addr_expr (child_fn);
8577 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8578 device, fnaddr, openmp_target, t1, t2, t3, t4);
8580 else
8581 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8582 device, openmp_target, t1, t2, t3, t4);
8583 gimple_set_location (g, gimple_location (entry_stmt));
8584 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8585 if (kind != GF_OMP_TARGET_KIND_REGION)
8587 g = gsi_stmt (gsi);
8588 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8589 gsi_remove (&gsi, true);
8591 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8593 gsi = gsi_last_bb (region->exit);
8594 g = gsi_stmt (gsi);
8595 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8596 gsi_remove (&gsi, true);
8601 /* Expand the parallel region tree rooted at REGION. Expansion
8602 proceeds in depth-first order. Innermost regions are expanded
8603 first. This way, parallel regions that require a new function to
8604 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8605 internal dependencies in their body. */
8607 static void
8608 expand_omp (struct omp_region *region)
8610 while (region)
8612 location_t saved_location;
8613 gimple inner_stmt = NULL;
8615 /* First, determine whether this is a combined parallel+workshare
8616 region. */
8617 if (region->type == GIMPLE_OMP_PARALLEL)
8618 determine_parallel_type (region);
8620 if (region->type == GIMPLE_OMP_FOR
8621 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8622 inner_stmt = last_stmt (region->inner->entry);
8624 if (region->inner)
8625 expand_omp (region->inner);
8627 saved_location = input_location;
8628 if (gimple_has_location (last_stmt (region->entry)))
8629 input_location = gimple_location (last_stmt (region->entry));
8631 switch (region->type)
8633 case GIMPLE_OMP_PARALLEL:
8634 case GIMPLE_OMP_TASK:
8635 expand_omp_taskreg (region);
8636 break;
8638 case GIMPLE_OMP_FOR:
8639 expand_omp_for (region, inner_stmt);
8640 break;
8642 case GIMPLE_OMP_SECTIONS:
8643 expand_omp_sections (region);
8644 break;
8646 case GIMPLE_OMP_SECTION:
8647 /* Individual omp sections are handled together with their
8648 parent GIMPLE_OMP_SECTIONS region. */
8649 break;
8651 case GIMPLE_OMP_SINGLE:
8652 expand_omp_single (region);
8653 break;
8655 case GIMPLE_OMP_MASTER:
8656 case GIMPLE_OMP_TASKGROUP:
8657 case GIMPLE_OMP_ORDERED:
8658 case GIMPLE_OMP_CRITICAL:
8659 case GIMPLE_OMP_TEAMS:
8660 expand_omp_synch (region);
8661 break;
8663 case GIMPLE_OMP_ATOMIC_LOAD:
8664 expand_omp_atomic (region);
8665 break;
8667 case GIMPLE_OMP_TARGET:
8668 expand_omp_target (region);
8669 break;
8671 default:
8672 gcc_unreachable ();
8675 input_location = saved_location;
8676 region = region->next;
8681 /* Helper for build_omp_regions. Scan the dominator tree starting at
8682 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8683 true, the function ends once a single tree is built (otherwise, whole
8684 forest of OMP constructs may be built). */
8686 static void
8687 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8688 bool single_tree)
8690 gimple_stmt_iterator gsi;
8691 gimple stmt;
8692 basic_block son;
8694 gsi = gsi_last_bb (bb);
8695 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8697 struct omp_region *region;
8698 enum gimple_code code;
8700 stmt = gsi_stmt (gsi);
8701 code = gimple_code (stmt);
8702 if (code == GIMPLE_OMP_RETURN)
8704 /* STMT is the return point out of region PARENT. Mark it
8705 as the exit point and make PARENT the immediately
8706 enclosing region. */
8707 gcc_assert (parent);
8708 region = parent;
8709 region->exit = bb;
8710 parent = parent->outer;
8712 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8714 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8715 GIMPLE_OMP_RETURN, but matches with
8716 GIMPLE_OMP_ATOMIC_LOAD. */
8717 gcc_assert (parent);
8718 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8719 region = parent;
8720 region->exit = bb;
8721 parent = parent->outer;
8724 else if (code == GIMPLE_OMP_CONTINUE)
8726 gcc_assert (parent);
8727 parent->cont = bb;
8729 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8731 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8732 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8735 else if (code == GIMPLE_OMP_TARGET
8736 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8737 new_omp_region (bb, code, parent);
8738 else
8740 /* Otherwise, this directive becomes the parent for a new
8741 region. */
8742 region = new_omp_region (bb, code, parent);
8743 parent = region;
8747 if (single_tree && !parent)
8748 return;
8750 for (son = first_dom_son (CDI_DOMINATORS, bb);
8751 son;
8752 son = next_dom_son (CDI_DOMINATORS, son))
8753 build_omp_regions_1 (son, parent, single_tree);
8756 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8757 root_omp_region. */
8759 static void
8760 build_omp_regions_root (basic_block root)
8762 gcc_assert (root_omp_region == NULL);
8763 build_omp_regions_1 (root, NULL, true);
8764 gcc_assert (root_omp_region != NULL);
8767 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8769 void
8770 omp_expand_local (basic_block head)
8772 build_omp_regions_root (head);
8773 if (dump_file && (dump_flags & TDF_DETAILS))
8775 fprintf (dump_file, "\nOMP region tree\n\n");
8776 dump_omp_region (dump_file, root_omp_region, 0);
8777 fprintf (dump_file, "\n");
8780 remove_exit_barriers (root_omp_region);
8781 expand_omp (root_omp_region);
8783 free_omp_regions ();
8786 /* Scan the CFG and build a tree of OMP regions. Return the root of
8787 the OMP region tree. */
8789 static void
8790 build_omp_regions (void)
8792 gcc_assert (root_omp_region == NULL);
8793 calculate_dominance_info (CDI_DOMINATORS);
8794 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8797 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8799 static unsigned int
8800 execute_expand_omp (void)
8802 build_omp_regions ();
8804 if (!root_omp_region)
8805 return 0;
8807 if (dump_file)
8809 fprintf (dump_file, "\nOMP region tree\n\n");
8810 dump_omp_region (dump_file, root_omp_region, 0);
8811 fprintf (dump_file, "\n");
8814 remove_exit_barriers (root_omp_region);
8816 expand_omp (root_omp_region);
8818 cleanup_tree_cfg ();
8820 free_omp_regions ();
8822 return 0;
8825 /* OMP expansion -- the default pass, run before creation of SSA form. */
8827 namespace {
8829 const pass_data pass_data_expand_omp =
8831 GIMPLE_PASS, /* type */
8832 "ompexp", /* name */
8833 OPTGROUP_NONE, /* optinfo_flags */
8834 TV_NONE, /* tv_id */
8835 PROP_gimple_any, /* properties_required */
8836 PROP_gimple_eomp, /* properties_provided */
8837 0, /* properties_destroyed */
8838 0, /* todo_flags_start */
8839 0, /* todo_flags_finish */
8842 class pass_expand_omp : public gimple_opt_pass
8844 public:
8845 pass_expand_omp (gcc::context *ctxt)
8846 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8849 /* opt_pass methods: */
8850 virtual unsigned int execute (function *)
8852 bool gate = ((flag_openmp != 0 || flag_openmp_simd != 0
8853 || flag_cilkplus != 0) && !seen_error ());
8855 /* This pass always runs, to provide PROP_gimple_eomp.
8856 But there is nothing to do unless -fopenmp is given. */
8857 if (!gate)
8858 return 0;
8860 return execute_expand_omp ();
8863 }; // class pass_expand_omp
8865 } // anon namespace
8867 gimple_opt_pass *
8868 make_pass_expand_omp (gcc::context *ctxt)
8870 return new pass_expand_omp (ctxt);
8873 namespace {
8875 const pass_data pass_data_expand_omp_ssa =
8877 GIMPLE_PASS, /* type */
8878 "ompexpssa", /* name */
8879 OPTGROUP_NONE, /* optinfo_flags */
8880 TV_NONE, /* tv_id */
8881 PROP_cfg | PROP_ssa, /* properties_required */
8882 PROP_gimple_eomp, /* properties_provided */
8883 0, /* properties_destroyed */
8884 0, /* todo_flags_start */
8885 TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */
8888 class pass_expand_omp_ssa : public gimple_opt_pass
8890 public:
8891 pass_expand_omp_ssa (gcc::context *ctxt)
8892 : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt)
8895 /* opt_pass methods: */
8896 virtual bool gate (function *fun)
8898 return !(fun->curr_properties & PROP_gimple_eomp);
8900 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8902 }; // class pass_expand_omp_ssa
8904 } // anon namespace
8906 gimple_opt_pass *
8907 make_pass_expand_omp_ssa (gcc::context *ctxt)
8909 return new pass_expand_omp_ssa (ctxt);
8912 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8914 /* If ctx is a worksharing context inside of a cancellable parallel
8915 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8916 and conditional branch to parallel's cancel_label to handle
8917 cancellation in the implicit barrier. */
8919 static void
8920 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8922 gimple omp_return = gimple_seq_last_stmt (*body);
8923 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8924 if (gimple_omp_return_nowait_p (omp_return))
8925 return;
8926 if (ctx->outer
8927 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8928 && ctx->outer->cancellable)
8930 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
8931 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
8932 tree lhs = create_tmp_var (c_bool_type);
8933 gimple_omp_return_set_lhs (omp_return, lhs);
8934 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8935 gimple g = gimple_build_cond (NE_EXPR, lhs,
8936 fold_convert (c_bool_type,
8937 boolean_false_node),
8938 ctx->outer->cancel_label, fallthru_label);
8939 gimple_seq_add_stmt (body, g);
8940 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8944 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8945 CTX is the enclosing OMP context for the current statement. */
8947 static void
8948 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8950 tree block, control;
8951 gimple_stmt_iterator tgsi;
8952 gomp_sections *stmt;
8953 gimple t;
8954 gbind *new_stmt, *bind;
8955 gimple_seq ilist, dlist, olist, new_body;
8957 stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
8959 push_gimplify_context ();
8961 dlist = NULL;
8962 ilist = NULL;
8963 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8964 &ilist, &dlist, ctx, NULL);
8966 new_body = gimple_omp_body (stmt);
8967 gimple_omp_set_body (stmt, NULL);
8968 tgsi = gsi_start (new_body);
8969 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8971 omp_context *sctx;
8972 gimple sec_start;
8974 sec_start = gsi_stmt (tgsi);
8975 sctx = maybe_lookup_ctx (sec_start);
8976 gcc_assert (sctx);
8978 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8979 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8980 GSI_CONTINUE_LINKING);
8981 gimple_omp_set_body (sec_start, NULL);
8983 if (gsi_one_before_end_p (tgsi))
8985 gimple_seq l = NULL;
8986 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8987 &l, ctx);
8988 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8989 gimple_omp_section_set_last (sec_start);
8992 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8993 GSI_CONTINUE_LINKING);
8996 block = make_node (BLOCK);
8997 bind = gimple_build_bind (NULL, new_body, block);
8999 olist = NULL;
9000 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
9002 block = make_node (BLOCK);
9003 new_stmt = gimple_build_bind (NULL, NULL, block);
9004 gsi_replace (gsi_p, new_stmt, true);
9006 pop_gimplify_context (new_stmt);
9007 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9008 BLOCK_VARS (block) = gimple_bind_vars (bind);
9009 if (BLOCK_VARS (block))
9010 TREE_USED (block) = 1;
9012 new_body = NULL;
9013 gimple_seq_add_seq (&new_body, ilist);
9014 gimple_seq_add_stmt (&new_body, stmt);
9015 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
9016 gimple_seq_add_stmt (&new_body, bind);
9018 control = create_tmp_var (unsigned_type_node, ".section");
9019 t = gimple_build_omp_continue (control, control);
9020 gimple_omp_sections_set_control (stmt, control);
9021 gimple_seq_add_stmt (&new_body, t);
9023 gimple_seq_add_seq (&new_body, olist);
9024 if (ctx->cancellable)
9025 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
9026 gimple_seq_add_seq (&new_body, dlist);
9028 new_body = maybe_catch_exception (new_body);
9030 t = gimple_build_omp_return
9031 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
9032 OMP_CLAUSE_NOWAIT));
9033 gimple_seq_add_stmt (&new_body, t);
9034 maybe_add_implicit_barrier_cancel (ctx, &new_body);
9036 gimple_bind_set_body (new_stmt, new_body);
9040 /* A subroutine of lower_omp_single. Expand the simple form of
9041 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
9043 if (GOMP_single_start ())
9044 BODY;
9045 [ GOMP_barrier (); ] -> unless 'nowait' is present.
9047 FIXME. It may be better to delay expanding the logic of this until
9048 pass_expand_omp. The expanded logic may make the job more difficult
9049 to a synchronization analysis pass. */
9051 static void
9052 lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p)
9054 location_t loc = gimple_location (single_stmt);
9055 tree tlabel = create_artificial_label (loc);
9056 tree flabel = create_artificial_label (loc);
9057 gimple call, cond;
9058 tree lhs, decl;
9060 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
9061 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)));
9062 call = gimple_build_call (decl, 0);
9063 gimple_call_set_lhs (call, lhs);
9064 gimple_seq_add_stmt (pre_p, call);
9066 cond = gimple_build_cond (EQ_EXPR, lhs,
9067 fold_convert_loc (loc, TREE_TYPE (lhs),
9068 boolean_true_node),
9069 tlabel, flabel);
9070 gimple_seq_add_stmt (pre_p, cond);
9071 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
9072 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
9073 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
9077 /* A subroutine of lower_omp_single. Expand the simple form of
9078 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
9080 #pragma omp single copyprivate (a, b, c)
9082 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
9085 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
9087 BODY;
9088 copyout.a = a;
9089 copyout.b = b;
9090 copyout.c = c;
9091 GOMP_single_copy_end (&copyout);
9093 else
9095 a = copyout_p->a;
9096 b = copyout_p->b;
9097 c = copyout_p->c;
9099 GOMP_barrier ();
9102 FIXME. It may be better to delay expanding the logic of this until
9103 pass_expand_omp. The expanded logic may make the job more difficult
9104 to a synchronization analysis pass. */
9106 static void
9107 lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p,
9108 omp_context *ctx)
9110 tree ptr_type, t, l0, l1, l2, bfn_decl;
9111 gimple_seq copyin_seq;
9112 location_t loc = gimple_location (single_stmt);
9114 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
9116 ptr_type = build_pointer_type (ctx->record_type);
9117 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
9119 l0 = create_artificial_label (loc);
9120 l1 = create_artificial_label (loc);
9121 l2 = create_artificial_label (loc);
9123 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
9124 t = build_call_expr_loc (loc, bfn_decl, 0);
9125 t = fold_convert_loc (loc, ptr_type, t);
9126 gimplify_assign (ctx->receiver_decl, t, pre_p);
9128 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
9129 build_int_cst (ptr_type, 0));
9130 t = build3 (COND_EXPR, void_type_node, t,
9131 build_and_jump (&l0), build_and_jump (&l1));
9132 gimplify_and_add (t, pre_p);
9134 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
9136 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
9138 copyin_seq = NULL;
9139 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
9140 &copyin_seq, ctx);
9142 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9143 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
9144 t = build_call_expr_loc (loc, bfn_decl, 1, t);
9145 gimplify_and_add (t, pre_p);
9147 t = build_and_jump (&l2);
9148 gimplify_and_add (t, pre_p);
9150 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
9152 gimple_seq_add_seq (pre_p, copyin_seq);
9154 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
9158 /* Expand code for an OpenMP single directive. */
9160 static void
9161 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9163 tree block;
9164 gimple t;
9165 gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p));
9166 gbind *bind;
9167 gimple_seq bind_body, bind_body_tail = NULL, dlist;
9169 push_gimplify_context ();
9171 block = make_node (BLOCK);
9172 bind = gimple_build_bind (NULL, NULL, block);
9173 gsi_replace (gsi_p, bind, true);
9174 bind_body = NULL;
9175 dlist = NULL;
9176 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
9177 &bind_body, &dlist, ctx, NULL);
9178 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
9180 gimple_seq_add_stmt (&bind_body, single_stmt);
9182 if (ctx->record_type)
9183 lower_omp_single_copy (single_stmt, &bind_body, ctx);
9184 else
9185 lower_omp_single_simple (single_stmt, &bind_body);
9187 gimple_omp_set_body (single_stmt, NULL);
9189 gimple_seq_add_seq (&bind_body, dlist);
9191 bind_body = maybe_catch_exception (bind_body);
9193 t = gimple_build_omp_return
9194 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
9195 OMP_CLAUSE_NOWAIT));
9196 gimple_seq_add_stmt (&bind_body_tail, t);
9197 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
9198 if (ctx->record_type)
9200 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
9201 tree clobber = build_constructor (ctx->record_type, NULL);
9202 TREE_THIS_VOLATILE (clobber) = 1;
9203 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
9204 clobber), GSI_SAME_STMT);
9206 gimple_seq_add_seq (&bind_body, bind_body_tail);
9207 gimple_bind_set_body (bind, bind_body);
9209 pop_gimplify_context (bind);
9211 gimple_bind_append_vars (bind, ctx->block_vars);
9212 BLOCK_VARS (block) = ctx->block_vars;
9213 if (BLOCK_VARS (block))
9214 TREE_USED (block) = 1;
9218 /* Expand code for an OpenMP master directive. */
9220 static void
9221 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9223 tree block, lab = NULL, x, bfn_decl;
9224 gimple stmt = gsi_stmt (*gsi_p);
9225 gbind *bind;
9226 location_t loc = gimple_location (stmt);
9227 gimple_seq tseq;
9229 push_gimplify_context ();
9231 block = make_node (BLOCK);
9232 bind = gimple_build_bind (NULL, NULL, block);
9233 gsi_replace (gsi_p, bind, true);
9234 gimple_bind_add_stmt (bind, stmt);
9236 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9237 x = build_call_expr_loc (loc, bfn_decl, 0);
9238 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
9239 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
9240 tseq = NULL;
9241 gimplify_and_add (x, &tseq);
9242 gimple_bind_add_seq (bind, tseq);
9244 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9245 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9246 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9247 gimple_omp_set_body (stmt, NULL);
9249 gimple_bind_add_stmt (bind, gimple_build_label (lab));
9251 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9253 pop_gimplify_context (bind);
9255 gimple_bind_append_vars (bind, ctx->block_vars);
9256 BLOCK_VARS (block) = ctx->block_vars;
9260 /* Expand code for an OpenMP taskgroup directive. */
9262 static void
9263 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9265 gimple stmt = gsi_stmt (*gsi_p);
9266 gcall *x;
9267 gbind *bind;
9268 tree block = make_node (BLOCK);
9270 bind = gimple_build_bind (NULL, NULL, block);
9271 gsi_replace (gsi_p, bind, true);
9272 gimple_bind_add_stmt (bind, stmt);
9274 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
9276 gimple_bind_add_stmt (bind, x);
9278 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9279 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9280 gimple_omp_set_body (stmt, NULL);
9282 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9284 gimple_bind_append_vars (bind, ctx->block_vars);
9285 BLOCK_VARS (block) = ctx->block_vars;
9289 /* Expand code for an OpenMP ordered directive. */
9291 static void
9292 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9294 tree block;
9295 gimple stmt = gsi_stmt (*gsi_p);
9296 gcall *x;
9297 gbind *bind;
9299 push_gimplify_context ();
9301 block = make_node (BLOCK);
9302 bind = gimple_build_bind (NULL, NULL, block);
9303 gsi_replace (gsi_p, bind, true);
9304 gimple_bind_add_stmt (bind, stmt);
9306 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
9308 gimple_bind_add_stmt (bind, x);
9310 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9311 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9312 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9313 gimple_omp_set_body (stmt, NULL);
9315 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
9316 gimple_bind_add_stmt (bind, x);
9318 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9320 pop_gimplify_context (bind);
9322 gimple_bind_append_vars (bind, ctx->block_vars);
9323 BLOCK_VARS (block) = gimple_bind_vars (bind);
9327 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
9328 substitution of a couple of function calls. But in the NAMED case,
9329 requires that languages coordinate a symbol name. It is therefore
9330 best put here in common code. */
9332 static GTY(()) hash_map<tree, tree> *critical_name_mutexes;
9334 static void
9335 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9337 tree block;
9338 tree name, lock, unlock;
9339 gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p));
9340 gbind *bind;
9341 location_t loc = gimple_location (stmt);
9342 gimple_seq tbody;
9344 name = gimple_omp_critical_name (stmt);
9345 if (name)
9347 tree decl;
9349 if (!critical_name_mutexes)
9350 critical_name_mutexes = hash_map<tree, tree>::create_ggc (10);
9352 tree *n = critical_name_mutexes->get (name);
9353 if (n == NULL)
9355 char *new_str;
9357 decl = create_tmp_var_raw (ptr_type_node);
9359 new_str = ACONCAT ((".gomp_critical_user_",
9360 IDENTIFIER_POINTER (name), NULL));
9361 DECL_NAME (decl) = get_identifier (new_str);
9362 TREE_PUBLIC (decl) = 1;
9363 TREE_STATIC (decl) = 1;
9364 DECL_COMMON (decl) = 1;
9365 DECL_ARTIFICIAL (decl) = 1;
9366 DECL_IGNORED_P (decl) = 1;
9368 varpool_node::finalize_decl (decl);
9370 critical_name_mutexes->put (name, decl);
9372 else
9373 decl = *n;
9375 /* If '#pragma omp critical' is inside target region or
9376 inside function marked as offloadable, the symbol must be
9377 marked as offloadable too. */
9378 omp_context *octx;
9379 if (cgraph_node::get (current_function_decl)->offloadable)
9380 varpool_node::get_create (decl)->offloadable = 1;
9381 else
9382 for (octx = ctx->outer; octx; octx = octx->outer)
9383 if (is_targetreg_ctx (octx))
9385 varpool_node::get_create (decl)->offloadable = 1;
9386 break;
9389 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
9390 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
9392 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
9393 unlock = build_call_expr_loc (loc, unlock, 1,
9394 build_fold_addr_expr_loc (loc, decl));
9396 else
9398 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
9399 lock = build_call_expr_loc (loc, lock, 0);
9401 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
9402 unlock = build_call_expr_loc (loc, unlock, 0);
9405 push_gimplify_context ();
9407 block = make_node (BLOCK);
9408 bind = gimple_build_bind (NULL, NULL, block);
9409 gsi_replace (gsi_p, bind, true);
9410 gimple_bind_add_stmt (bind, stmt);
9412 tbody = gimple_bind_body (bind);
9413 gimplify_and_add (lock, &tbody);
9414 gimple_bind_set_body (bind, tbody);
9416 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9417 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9418 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9419 gimple_omp_set_body (stmt, NULL);
9421 tbody = gimple_bind_body (bind);
9422 gimplify_and_add (unlock, &tbody);
9423 gimple_bind_set_body (bind, tbody);
9425 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9427 pop_gimplify_context (bind);
9428 gimple_bind_append_vars (bind, ctx->block_vars);
9429 BLOCK_VARS (block) = gimple_bind_vars (bind);
9433 /* A subroutine of lower_omp_for. Generate code to emit the predicate
9434 for a lastprivate clause. Given a loop control predicate of (V
9435 cond N2), we gate the clause on (!(V cond N2)). The lowered form
9436 is appended to *DLIST, iterator initialization is appended to
9437 *BODY_P. */
9439 static void
9440 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
9441 gimple_seq *dlist, struct omp_context *ctx)
9443 tree clauses, cond, vinit;
9444 enum tree_code cond_code;
9445 gimple_seq stmts;
9447 cond_code = fd->loop.cond_code;
9448 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
9450 /* When possible, use a strict equality expression. This can let VRP
9451 type optimizations deduce the value and remove a copy. */
9452 if (tree_fits_shwi_p (fd->loop.step))
9454 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
9455 if (step == 1 || step == -1)
9456 cond_code = EQ_EXPR;
9459 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
9461 clauses = gimple_omp_for_clauses (fd->for_stmt);
9462 stmts = NULL;
9463 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
9464 if (!gimple_seq_empty_p (stmts))
9466 gimple_seq_add_seq (&stmts, *dlist);
9467 *dlist = stmts;
9469 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
9470 vinit = fd->loop.n1;
9471 if (cond_code == EQ_EXPR
9472 && tree_fits_shwi_p (fd->loop.n2)
9473 && ! integer_zerop (fd->loop.n2))
9474 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
9475 else
9476 vinit = unshare_expr (vinit);
9478 /* Initialize the iterator variable, so that threads that don't execute
9479 any iterations don't execute the lastprivate clauses by accident. */
9480 gimplify_assign (fd->loop.v, vinit, body_p);
9485 /* Lower code for an OpenMP loop directive. */
9487 static void
9488 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9490 tree *rhs_p, block;
9491 struct omp_for_data fd, *fdp = NULL;
9492 gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
9493 gbind *new_stmt;
9494 gimple_seq omp_for_body, body, dlist;
9495 size_t i;
9497 push_gimplify_context ();
9499 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
9501 block = make_node (BLOCK);
9502 new_stmt = gimple_build_bind (NULL, NULL, block);
9503 /* Replace at gsi right away, so that 'stmt' is no member
9504 of a sequence anymore as we're going to add to to a different
9505 one below. */
9506 gsi_replace (gsi_p, new_stmt, true);
9508 /* Move declaration of temporaries in the loop body before we make
9509 it go away. */
9510 omp_for_body = gimple_omp_body (stmt);
9511 if (!gimple_seq_empty_p (omp_for_body)
9512 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
9514 gbind *inner_bind
9515 = as_a <gbind *> (gimple_seq_first_stmt (omp_for_body));
9516 tree vars = gimple_bind_vars (inner_bind);
9517 gimple_bind_append_vars (new_stmt, vars);
9518 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
9519 keep them on the inner_bind and it's block. */
9520 gimple_bind_set_vars (inner_bind, NULL_TREE);
9521 if (gimple_bind_block (inner_bind))
9522 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
9525 if (gimple_omp_for_combined_into_p (stmt))
9527 extract_omp_for_data (stmt, &fd, NULL);
9528 fdp = &fd;
9530 /* We need two temporaries with fd.loop.v type (istart/iend)
9531 and then (fd.collapse - 1) temporaries with the same
9532 type for count2 ... countN-1 vars if not constant. */
9533 size_t count = 2;
9534 tree type = fd.iter_type;
9535 if (fd.collapse > 1
9536 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
9537 count += fd.collapse - 1;
9538 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
9539 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
9540 tree clauses = *pc;
9541 if (parallel_for)
9542 outerc
9543 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
9544 OMP_CLAUSE__LOOPTEMP_);
9545 for (i = 0; i < count; i++)
9547 tree temp;
9548 if (parallel_for)
9550 gcc_assert (outerc);
9551 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
9552 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
9553 OMP_CLAUSE__LOOPTEMP_);
9555 else
9557 temp = create_tmp_var (type);
9558 insert_decl_map (&ctx->outer->cb, temp, temp);
9560 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
9561 OMP_CLAUSE_DECL (*pc) = temp;
9562 pc = &OMP_CLAUSE_CHAIN (*pc);
9564 *pc = clauses;
9567 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9568 dlist = NULL;
9569 body = NULL;
9570 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
9571 fdp);
9572 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
9574 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9576 /* Lower the header expressions. At this point, we can assume that
9577 the header is of the form:
9579 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9581 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9582 using the .omp_data_s mapping, if needed. */
9583 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
9585 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
9586 if (!is_gimple_min_invariant (*rhs_p))
9587 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9589 rhs_p = gimple_omp_for_final_ptr (stmt, i);
9590 if (!is_gimple_min_invariant (*rhs_p))
9591 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9593 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
9594 if (!is_gimple_min_invariant (*rhs_p))
9595 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9598 /* Once lowered, extract the bounds and clauses. */
9599 extract_omp_for_data (stmt, &fd, NULL);
9601 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
9603 gimple_seq_add_stmt (&body, stmt);
9604 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
9606 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
9607 fd.loop.v));
9609 /* After the loop, add exit clauses. */
9610 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
9612 if (ctx->cancellable)
9613 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
9615 gimple_seq_add_seq (&body, dlist);
9617 body = maybe_catch_exception (body);
9619 /* Region exit marker goes at the end of the loop body. */
9620 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
9621 maybe_add_implicit_barrier_cancel (ctx, &body);
9622 pop_gimplify_context (new_stmt);
9624 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9625 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
9626 if (BLOCK_VARS (block))
9627 TREE_USED (block) = 1;
9629 gimple_bind_set_body (new_stmt, body);
9630 gimple_omp_set_body (stmt, NULL);
9631 gimple_omp_for_set_pre_body (stmt, NULL);
9634 /* Callback for walk_stmts. Check if the current statement only contains
9635 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9637 static tree
9638 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9639 bool *handled_ops_p,
9640 struct walk_stmt_info *wi)
9642 int *info = (int *) wi->info;
9643 gimple stmt = gsi_stmt (*gsi_p);
9645 *handled_ops_p = true;
9646 switch (gimple_code (stmt))
9648 WALK_SUBSTMTS;
9650 case GIMPLE_OMP_FOR:
9651 case GIMPLE_OMP_SECTIONS:
9652 *info = *info == 0 ? 1 : -1;
9653 break;
9654 default:
9655 *info = -1;
9656 break;
9658 return NULL;
9661 struct omp_taskcopy_context
9663 /* This field must be at the beginning, as we do "inheritance": Some
9664 callback functions for tree-inline.c (e.g., omp_copy_decl)
9665 receive a copy_body_data pointer that is up-casted to an
9666 omp_context pointer. */
9667 copy_body_data cb;
9668 omp_context *ctx;
9671 static tree
9672 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9674 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9676 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9677 return create_tmp_var (TREE_TYPE (var));
9679 return var;
9682 static tree
9683 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9685 tree name, new_fields = NULL, type, f;
9687 type = lang_hooks.types.make_type (RECORD_TYPE);
9688 name = DECL_NAME (TYPE_NAME (orig_type));
9689 name = build_decl (gimple_location (tcctx->ctx->stmt),
9690 TYPE_DECL, name, type);
9691 TYPE_NAME (type) = name;
9693 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9695 tree new_f = copy_node (f);
9696 DECL_CONTEXT (new_f) = type;
9697 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9698 TREE_CHAIN (new_f) = new_fields;
9699 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9700 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9701 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9702 &tcctx->cb, NULL);
9703 new_fields = new_f;
9704 tcctx->cb.decl_map->put (f, new_f);
9706 TYPE_FIELDS (type) = nreverse (new_fields);
9707 layout_type (type);
9708 return type;
9711 /* Create task copyfn. */
9713 static void
9714 create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
9716 struct function *child_cfun;
9717 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9718 tree record_type, srecord_type, bind, list;
9719 bool record_needs_remap = false, srecord_needs_remap = false;
9720 splay_tree_node n;
9721 struct omp_taskcopy_context tcctx;
9722 location_t loc = gimple_location (task_stmt);
9724 child_fn = gimple_omp_task_copy_fn (task_stmt);
9725 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9726 gcc_assert (child_cfun->cfg == NULL);
9727 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9729 /* Reset DECL_CONTEXT on function arguments. */
9730 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9731 DECL_CONTEXT (t) = child_fn;
9733 /* Populate the function. */
9734 push_gimplify_context ();
9735 push_cfun (child_cfun);
9737 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9738 TREE_SIDE_EFFECTS (bind) = 1;
9739 list = NULL;
9740 DECL_SAVED_TREE (child_fn) = bind;
9741 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9743 /* Remap src and dst argument types if needed. */
9744 record_type = ctx->record_type;
9745 srecord_type = ctx->srecord_type;
9746 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9747 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9749 record_needs_remap = true;
9750 break;
9752 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9753 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9755 srecord_needs_remap = true;
9756 break;
9759 if (record_needs_remap || srecord_needs_remap)
9761 memset (&tcctx, '\0', sizeof (tcctx));
9762 tcctx.cb.src_fn = ctx->cb.src_fn;
9763 tcctx.cb.dst_fn = child_fn;
9764 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
9765 gcc_checking_assert (tcctx.cb.src_node);
9766 tcctx.cb.dst_node = tcctx.cb.src_node;
9767 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9768 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9769 tcctx.cb.eh_lp_nr = 0;
9770 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9771 tcctx.cb.decl_map = new hash_map<tree, tree>;
9772 tcctx.ctx = ctx;
9774 if (record_needs_remap)
9775 record_type = task_copyfn_remap_type (&tcctx, record_type);
9776 if (srecord_needs_remap)
9777 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9779 else
9780 tcctx.cb.decl_map = NULL;
9782 arg = DECL_ARGUMENTS (child_fn);
9783 TREE_TYPE (arg) = build_pointer_type (record_type);
9784 sarg = DECL_CHAIN (arg);
9785 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9787 /* First pass: initialize temporaries used in record_type and srecord_type
9788 sizes and field offsets. */
9789 if (tcctx.cb.decl_map)
9790 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9791 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9793 tree *p;
9795 decl = OMP_CLAUSE_DECL (c);
9796 p = tcctx.cb.decl_map->get (decl);
9797 if (p == NULL)
9798 continue;
9799 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9800 sf = (tree) n->value;
9801 sf = *tcctx.cb.decl_map->get (sf);
9802 src = build_simple_mem_ref_loc (loc, sarg);
9803 src = omp_build_component_ref (src, sf);
9804 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9805 append_to_statement_list (t, &list);
9808 /* Second pass: copy shared var pointers and copy construct non-VLA
9809 firstprivate vars. */
9810 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9811 switch (OMP_CLAUSE_CODE (c))
9813 case OMP_CLAUSE_SHARED:
9814 decl = OMP_CLAUSE_DECL (c);
9815 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9816 if (n == NULL)
9817 break;
9818 f = (tree) n->value;
9819 if (tcctx.cb.decl_map)
9820 f = *tcctx.cb.decl_map->get (f);
9821 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9822 sf = (tree) n->value;
9823 if (tcctx.cb.decl_map)
9824 sf = *tcctx.cb.decl_map->get (sf);
9825 src = build_simple_mem_ref_loc (loc, sarg);
9826 src = omp_build_component_ref (src, sf);
9827 dst = build_simple_mem_ref_loc (loc, arg);
9828 dst = omp_build_component_ref (dst, f);
9829 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9830 append_to_statement_list (t, &list);
9831 break;
9832 case OMP_CLAUSE_FIRSTPRIVATE:
9833 decl = OMP_CLAUSE_DECL (c);
9834 if (is_variable_sized (decl))
9835 break;
9836 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9837 if (n == NULL)
9838 break;
9839 f = (tree) n->value;
9840 if (tcctx.cb.decl_map)
9841 f = *tcctx.cb.decl_map->get (f);
9842 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9843 if (n != NULL)
9845 sf = (tree) n->value;
9846 if (tcctx.cb.decl_map)
9847 sf = *tcctx.cb.decl_map->get (sf);
9848 src = build_simple_mem_ref_loc (loc, sarg);
9849 src = omp_build_component_ref (src, sf);
9850 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9851 src = build_simple_mem_ref_loc (loc, src);
9853 else
9854 src = decl;
9855 dst = build_simple_mem_ref_loc (loc, arg);
9856 dst = omp_build_component_ref (dst, f);
9857 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9858 append_to_statement_list (t, &list);
9859 break;
9860 case OMP_CLAUSE_PRIVATE:
9861 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9862 break;
9863 decl = OMP_CLAUSE_DECL (c);
9864 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9865 f = (tree) n->value;
9866 if (tcctx.cb.decl_map)
9867 f = *tcctx.cb.decl_map->get (f);
9868 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9869 if (n != NULL)
9871 sf = (tree) n->value;
9872 if (tcctx.cb.decl_map)
9873 sf = *tcctx.cb.decl_map->get (sf);
9874 src = build_simple_mem_ref_loc (loc, sarg);
9875 src = omp_build_component_ref (src, sf);
9876 if (use_pointer_for_field (decl, NULL))
9877 src = build_simple_mem_ref_loc (loc, src);
9879 else
9880 src = decl;
9881 dst = build_simple_mem_ref_loc (loc, arg);
9882 dst = omp_build_component_ref (dst, f);
9883 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9884 append_to_statement_list (t, &list);
9885 break;
9886 default:
9887 break;
9890 /* Last pass: handle VLA firstprivates. */
9891 if (tcctx.cb.decl_map)
9892 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9893 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9895 tree ind, ptr, df;
9897 decl = OMP_CLAUSE_DECL (c);
9898 if (!is_variable_sized (decl))
9899 continue;
9900 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9901 if (n == NULL)
9902 continue;
9903 f = (tree) n->value;
9904 f = *tcctx.cb.decl_map->get (f);
9905 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9906 ind = DECL_VALUE_EXPR (decl);
9907 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9908 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9909 n = splay_tree_lookup (ctx->sfield_map,
9910 (splay_tree_key) TREE_OPERAND (ind, 0));
9911 sf = (tree) n->value;
9912 sf = *tcctx.cb.decl_map->get (sf);
9913 src = build_simple_mem_ref_loc (loc, sarg);
9914 src = omp_build_component_ref (src, sf);
9915 src = build_simple_mem_ref_loc (loc, src);
9916 dst = build_simple_mem_ref_loc (loc, arg);
9917 dst = omp_build_component_ref (dst, f);
9918 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9919 append_to_statement_list (t, &list);
9920 n = splay_tree_lookup (ctx->field_map,
9921 (splay_tree_key) TREE_OPERAND (ind, 0));
9922 df = (tree) n->value;
9923 df = *tcctx.cb.decl_map->get (df);
9924 ptr = build_simple_mem_ref_loc (loc, arg);
9925 ptr = omp_build_component_ref (ptr, df);
9926 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9927 build_fold_addr_expr_loc (loc, dst));
9928 append_to_statement_list (t, &list);
9931 t = build1 (RETURN_EXPR, void_type_node, NULL);
9932 append_to_statement_list (t, &list);
9934 if (tcctx.cb.decl_map)
9935 delete tcctx.cb.decl_map;
9936 pop_gimplify_context (NULL);
9937 BIND_EXPR_BODY (bind) = list;
9938 pop_cfun ();
9941 static void
9942 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9944 tree c, clauses;
9945 gimple g;
9946 size_t n_in = 0, n_out = 0, idx = 2, i;
9948 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9949 OMP_CLAUSE_DEPEND);
9950 gcc_assert (clauses);
9951 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9952 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9953 switch (OMP_CLAUSE_DEPEND_KIND (c))
9955 case OMP_CLAUSE_DEPEND_IN:
9956 n_in++;
9957 break;
9958 case OMP_CLAUSE_DEPEND_OUT:
9959 case OMP_CLAUSE_DEPEND_INOUT:
9960 n_out++;
9961 break;
9962 default:
9963 gcc_unreachable ();
9965 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9966 tree array = create_tmp_var (type);
9967 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9968 NULL_TREE);
9969 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9970 gimple_seq_add_stmt (iseq, g);
9971 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9972 NULL_TREE);
9973 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9974 gimple_seq_add_stmt (iseq, g);
9975 for (i = 0; i < 2; i++)
9977 if ((i ? n_in : n_out) == 0)
9978 continue;
9979 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9980 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9981 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9983 tree t = OMP_CLAUSE_DECL (c);
9984 t = fold_convert (ptr_type_node, t);
9985 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9986 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9987 NULL_TREE, NULL_TREE);
9988 g = gimple_build_assign (r, t);
9989 gimple_seq_add_stmt (iseq, g);
9992 tree *p = gimple_omp_task_clauses_ptr (stmt);
9993 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9994 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9995 OMP_CLAUSE_CHAIN (c) = *p;
9996 *p = c;
9997 tree clobber = build_constructor (type, NULL);
9998 TREE_THIS_VOLATILE (clobber) = 1;
9999 g = gimple_build_assign (array, clobber);
10000 gimple_seq_add_stmt (oseq, g);
10003 /* Lower the OpenMP parallel or task directive in the current statement
10004 in GSI_P. CTX holds context information for the directive. */
10006 static void
10007 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10009 tree clauses;
10010 tree child_fn, t;
10011 gimple stmt = gsi_stmt (*gsi_p);
10012 gbind *par_bind, *bind, *dep_bind = NULL;
10013 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
10014 location_t loc = gimple_location (stmt);
10016 clauses = gimple_omp_taskreg_clauses (stmt);
10017 par_bind
10018 = as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
10019 par_body = gimple_bind_body (par_bind);
10020 child_fn = ctx->cb.dst_fn;
10021 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
10022 && !gimple_omp_parallel_combined_p (stmt))
10024 struct walk_stmt_info wi;
10025 int ws_num = 0;
10027 memset (&wi, 0, sizeof (wi));
10028 wi.info = &ws_num;
10029 wi.val_only = true;
10030 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
10031 if (ws_num == 1)
10032 gimple_omp_parallel_set_combined_p (stmt, true);
10034 gimple_seq dep_ilist = NULL;
10035 gimple_seq dep_olist = NULL;
10036 if (gimple_code (stmt) == GIMPLE_OMP_TASK
10037 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
10039 push_gimplify_context ();
10040 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
10041 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
10044 if (ctx->srecord_type)
10045 create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
10047 push_gimplify_context ();
10049 par_olist = NULL;
10050 par_ilist = NULL;
10051 par_rlist = NULL;
10052 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
10053 lower_omp (&par_body, ctx);
10054 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
10055 lower_reduction_clauses (clauses, &par_rlist, ctx);
10057 /* Declare all the variables created by mapping and the variables
10058 declared in the scope of the parallel body. */
10059 record_vars_into (ctx->block_vars, child_fn);
10060 record_vars_into (gimple_bind_vars (par_bind), child_fn);
10062 if (ctx->record_type)
10064 ctx->sender_decl
10065 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
10066 : ctx->record_type, ".omp_data_o");
10067 DECL_NAMELESS (ctx->sender_decl) = 1;
10068 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
10069 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
10072 olist = NULL;
10073 ilist = NULL;
10074 lower_send_clauses (clauses, &ilist, &olist, ctx);
10075 lower_send_shared_vars (&ilist, &olist, ctx);
10077 if (ctx->record_type)
10079 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
10080 TREE_THIS_VOLATILE (clobber) = 1;
10081 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
10082 clobber));
10085 /* Once all the expansions are done, sequence all the different
10086 fragments inside gimple_omp_body. */
10088 new_body = NULL;
10090 if (ctx->record_type)
10092 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10093 /* fixup_child_record_type might have changed receiver_decl's type. */
10094 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10095 gimple_seq_add_stmt (&new_body,
10096 gimple_build_assign (ctx->receiver_decl, t));
10099 gimple_seq_add_seq (&new_body, par_ilist);
10100 gimple_seq_add_seq (&new_body, par_body);
10101 gimple_seq_add_seq (&new_body, par_rlist);
10102 if (ctx->cancellable)
10103 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
10104 gimple_seq_add_seq (&new_body, par_olist);
10105 new_body = maybe_catch_exception (new_body);
10106 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10107 gimple_omp_set_body (stmt, new_body);
10109 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
10110 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
10111 gimple_bind_add_seq (bind, ilist);
10112 gimple_bind_add_stmt (bind, stmt);
10113 gimple_bind_add_seq (bind, olist);
10115 pop_gimplify_context (NULL);
10117 if (dep_bind)
10119 gimple_bind_add_seq (dep_bind, dep_ilist);
10120 gimple_bind_add_stmt (dep_bind, bind);
10121 gimple_bind_add_seq (dep_bind, dep_olist);
10122 pop_gimplify_context (dep_bind);
10126 /* Lower the OpenMP target directive in the current statement
10127 in GSI_P. CTX holds context information for the directive. */
10129 static void
10130 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10132 tree clauses;
10133 tree child_fn, t, c;
10134 gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p));
10135 gbind *tgt_bind = NULL, *bind;
10136 gimple_seq tgt_body = NULL, olist, ilist, new_body;
10137 location_t loc = gimple_location (stmt);
10138 int kind = gimple_omp_target_kind (stmt);
10139 unsigned int map_cnt = 0;
10141 clauses = gimple_omp_target_clauses (stmt);
10142 if (kind == GF_OMP_TARGET_KIND_REGION)
10144 tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
10145 tgt_body = gimple_bind_body (tgt_bind);
10147 else if (kind == GF_OMP_TARGET_KIND_DATA)
10148 tgt_body = gimple_omp_body (stmt);
10149 child_fn = ctx->cb.dst_fn;
10151 push_gimplify_context ();
10153 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10154 switch (OMP_CLAUSE_CODE (c))
10156 tree var, x;
10158 default:
10159 break;
10160 case OMP_CLAUSE_MAP:
10161 case OMP_CLAUSE_TO:
10162 case OMP_CLAUSE_FROM:
10163 var = OMP_CLAUSE_DECL (c);
10164 if (!DECL_P (var))
10166 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
10167 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10168 map_cnt++;
10169 continue;
10172 if (DECL_SIZE (var)
10173 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
10175 tree var2 = DECL_VALUE_EXPR (var);
10176 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
10177 var2 = TREE_OPERAND (var2, 0);
10178 gcc_assert (DECL_P (var2));
10179 var = var2;
10182 if (!maybe_lookup_field (var, ctx))
10183 continue;
10185 if (kind == GF_OMP_TARGET_KIND_REGION)
10187 x = build_receiver_ref (var, true, ctx);
10188 tree new_var = lookup_decl (var, ctx);
10189 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10190 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10191 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10192 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
10193 x = build_simple_mem_ref (x);
10194 SET_DECL_VALUE_EXPR (new_var, x);
10195 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
10197 map_cnt++;
10200 if (kind == GF_OMP_TARGET_KIND_REGION)
10202 target_nesting_level++;
10203 lower_omp (&tgt_body, ctx);
10204 target_nesting_level--;
10206 else if (kind == GF_OMP_TARGET_KIND_DATA)
10207 lower_omp (&tgt_body, ctx);
10209 if (kind == GF_OMP_TARGET_KIND_REGION)
10211 /* Declare all the variables created by mapping and the variables
10212 declared in the scope of the target body. */
10213 record_vars_into (ctx->block_vars, child_fn);
10214 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
10217 olist = NULL;
10218 ilist = NULL;
10219 if (ctx->record_type)
10221 ctx->sender_decl
10222 = create_tmp_var (ctx->record_type, ".omp_data_arr");
10223 DECL_NAMELESS (ctx->sender_decl) = 1;
10224 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
10225 t = make_tree_vec (3);
10226 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
10227 TREE_VEC_ELT (t, 1)
10228 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
10229 ".omp_data_sizes");
10230 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
10231 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
10232 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
10233 TREE_VEC_ELT (t, 2)
10234 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
10235 map_cnt),
10236 ".omp_data_kinds");
10237 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
10238 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
10239 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
10240 gimple_omp_target_set_data_arg (stmt, t);
10242 vec<constructor_elt, va_gc> *vsize;
10243 vec<constructor_elt, va_gc> *vkind;
10244 vec_alloc (vsize, map_cnt);
10245 vec_alloc (vkind, map_cnt);
10246 unsigned int map_idx = 0;
10248 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10249 switch (OMP_CLAUSE_CODE (c))
10251 tree ovar, nc;
10253 default:
10254 break;
10255 case OMP_CLAUSE_MAP:
10256 case OMP_CLAUSE_TO:
10257 case OMP_CLAUSE_FROM:
10258 nc = c;
10259 ovar = OMP_CLAUSE_DECL (c);
10260 if (!DECL_P (ovar))
10262 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10263 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10265 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
10266 == get_base_address (ovar));
10267 nc = OMP_CLAUSE_CHAIN (c);
10268 ovar = OMP_CLAUSE_DECL (nc);
10270 else
10272 tree x = build_sender_ref (ovar, ctx);
10273 tree v
10274 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
10275 gimplify_assign (x, v, &ilist);
10276 nc = NULL_TREE;
10279 else
10281 if (DECL_SIZE (ovar)
10282 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
10284 tree ovar2 = DECL_VALUE_EXPR (ovar);
10285 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
10286 ovar2 = TREE_OPERAND (ovar2, 0);
10287 gcc_assert (DECL_P (ovar2));
10288 ovar = ovar2;
10290 if (!maybe_lookup_field (ovar, ctx))
10291 continue;
10294 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
10295 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
10296 talign = DECL_ALIGN_UNIT (ovar);
10297 if (nc)
10299 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
10300 tree x = build_sender_ref (ovar, ctx);
10301 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10302 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10303 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10304 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
10306 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10307 tree avar
10308 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)));
10309 mark_addressable (avar);
10310 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
10311 talign = DECL_ALIGN_UNIT (avar);
10312 avar = build_fold_addr_expr (avar);
10313 gimplify_assign (x, avar, &ilist);
10315 else if (is_gimple_reg (var))
10317 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10318 tree avar = create_tmp_var (TREE_TYPE (var));
10319 mark_addressable (avar);
10320 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
10321 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
10322 gimplify_assign (avar, var, &ilist);
10323 avar = build_fold_addr_expr (avar);
10324 gimplify_assign (x, avar, &ilist);
10325 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
10326 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
10327 && !TYPE_READONLY (TREE_TYPE (var)))
10329 x = build_sender_ref (ovar, ctx);
10330 x = build_simple_mem_ref (x);
10331 gimplify_assign (var, x, &olist);
10334 else
10336 var = build_fold_addr_expr (var);
10337 gimplify_assign (x, var, &ilist);
10340 tree s = OMP_CLAUSE_SIZE (c);
10341 if (s == NULL_TREE)
10342 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
10343 s = fold_convert (size_type_node, s);
10344 tree purpose = size_int (map_idx++);
10345 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
10346 if (TREE_CODE (s) != INTEGER_CST)
10347 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
10349 unsigned char tkind = 0;
10350 switch (OMP_CLAUSE_CODE (c))
10352 case OMP_CLAUSE_MAP:
10353 tkind = OMP_CLAUSE_MAP_KIND (c);
10354 break;
10355 case OMP_CLAUSE_TO:
10356 tkind = OMP_CLAUSE_MAP_TO;
10357 break;
10358 case OMP_CLAUSE_FROM:
10359 tkind = OMP_CLAUSE_MAP_FROM;
10360 break;
10361 default:
10362 gcc_unreachable ();
10364 talign = ceil_log2 (talign);
10365 tkind |= talign << 3;
10366 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
10367 build_int_cst (unsigned_char_type_node,
10368 tkind));
10369 if (nc && nc != c)
10370 c = nc;
10373 gcc_assert (map_idx == map_cnt);
10375 DECL_INITIAL (TREE_VEC_ELT (t, 1))
10376 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
10377 DECL_INITIAL (TREE_VEC_ELT (t, 2))
10378 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
10379 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
10381 gimple_seq initlist = NULL;
10382 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
10383 TREE_VEC_ELT (t, 1)),
10384 &initlist, true, NULL_TREE);
10385 gimple_seq_add_seq (&ilist, initlist);
10387 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
10388 NULL);
10389 TREE_THIS_VOLATILE (clobber) = 1;
10390 gimple_seq_add_stmt (&olist,
10391 gimple_build_assign (TREE_VEC_ELT (t, 1),
10392 clobber));
10395 tree clobber = build_constructor (ctx->record_type, NULL);
10396 TREE_THIS_VOLATILE (clobber) = 1;
10397 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
10398 clobber));
10401 /* Once all the expansions are done, sequence all the different
10402 fragments inside gimple_omp_body. */
10404 new_body = NULL;
10406 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
10408 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10409 /* fixup_child_record_type might have changed receiver_decl's type. */
10410 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10411 gimple_seq_add_stmt (&new_body,
10412 gimple_build_assign (ctx->receiver_decl, t));
10415 if (kind == GF_OMP_TARGET_KIND_REGION)
10417 gimple_seq_add_seq (&new_body, tgt_body);
10418 new_body = maybe_catch_exception (new_body);
10420 else if (kind == GF_OMP_TARGET_KIND_DATA)
10421 new_body = tgt_body;
10422 if (kind != GF_OMP_TARGET_KIND_UPDATE)
10424 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10425 gimple_omp_set_body (stmt, new_body);
10428 bind = gimple_build_bind (NULL, NULL,
10429 tgt_bind ? gimple_bind_block (tgt_bind)
10430 : NULL_TREE);
10431 gsi_replace (gsi_p, bind, true);
10432 gimple_bind_add_seq (bind, ilist);
10433 gimple_bind_add_stmt (bind, stmt);
10434 gimple_bind_add_seq (bind, olist);
10436 pop_gimplify_context (NULL);
10439 /* Expand code for an OpenMP teams directive. */
10441 static void
10442 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10444 gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p));
10445 push_gimplify_context ();
10447 tree block = make_node (BLOCK);
10448 gbind *bind = gimple_build_bind (NULL, NULL, block);
10449 gsi_replace (gsi_p, bind, true);
10450 gimple_seq bind_body = NULL;
10451 gimple_seq dlist = NULL;
10452 gimple_seq olist = NULL;
10454 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10455 OMP_CLAUSE_NUM_TEAMS);
10456 if (num_teams == NULL_TREE)
10457 num_teams = build_int_cst (unsigned_type_node, 0);
10458 else
10460 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
10461 num_teams = fold_convert (unsigned_type_node, num_teams);
10462 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
10464 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10465 OMP_CLAUSE_THREAD_LIMIT);
10466 if (thread_limit == NULL_TREE)
10467 thread_limit = build_int_cst (unsigned_type_node, 0);
10468 else
10470 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
10471 thread_limit = fold_convert (unsigned_type_node, thread_limit);
10472 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
10473 fb_rvalue);
10476 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
10477 &bind_body, &dlist, ctx, NULL);
10478 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
10479 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
10480 gimple_seq_add_stmt (&bind_body, teams_stmt);
10482 location_t loc = gimple_location (teams_stmt);
10483 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
10484 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
10485 gimple_set_location (call, loc);
10486 gimple_seq_add_stmt (&bind_body, call);
10488 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
10489 gimple_omp_set_body (teams_stmt, NULL);
10490 gimple_seq_add_seq (&bind_body, olist);
10491 gimple_seq_add_seq (&bind_body, dlist);
10492 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
10493 gimple_bind_set_body (bind, bind_body);
10495 pop_gimplify_context (bind);
10497 gimple_bind_append_vars (bind, ctx->block_vars);
10498 BLOCK_VARS (block) = ctx->block_vars;
10499 if (BLOCK_VARS (block))
10500 TREE_USED (block) = 1;
10504 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
10505 regimplified. If DATA is non-NULL, lower_omp_1 is outside
10506 of OpenMP context, but with task_shared_vars set. */
10508 static tree
10509 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
10510 void *data)
10512 tree t = *tp;
10514 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
10515 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
10516 return t;
10518 if (task_shared_vars
10519 && DECL_P (t)
10520 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
10521 return t;
10523 /* If a global variable has been privatized, TREE_CONSTANT on
10524 ADDR_EXPR might be wrong. */
10525 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
10526 recompute_tree_invariant_for_addr_expr (t);
10528 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
10529 return NULL_TREE;
10532 static void
10533 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10535 gimple stmt = gsi_stmt (*gsi_p);
10536 struct walk_stmt_info wi;
10537 gcall *call_stmt;
10539 if (gimple_has_location (stmt))
10540 input_location = gimple_location (stmt);
10542 if (task_shared_vars)
10543 memset (&wi, '\0', sizeof (wi));
10545 /* If we have issued syntax errors, avoid doing any heavy lifting.
10546 Just replace the OpenMP directives with a NOP to avoid
10547 confusing RTL expansion. */
10548 if (seen_error () && is_gimple_omp (stmt))
10550 gsi_replace (gsi_p, gimple_build_nop (), true);
10551 return;
10554 switch (gimple_code (stmt))
10556 case GIMPLE_COND:
10558 gcond *cond_stmt = as_a <gcond *> (stmt);
10559 if ((ctx || task_shared_vars)
10560 && (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
10561 lower_omp_regimplify_p,
10562 ctx ? NULL : &wi, NULL)
10563 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
10564 lower_omp_regimplify_p,
10565 ctx ? NULL : &wi, NULL)))
10566 gimple_regimplify_operands (cond_stmt, gsi_p);
10568 break;
10569 case GIMPLE_CATCH:
10570 lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx);
10571 break;
10572 case GIMPLE_EH_FILTER:
10573 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
10574 break;
10575 case GIMPLE_TRY:
10576 lower_omp (gimple_try_eval_ptr (stmt), ctx);
10577 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
10578 break;
10579 case GIMPLE_TRANSACTION:
10580 lower_omp (gimple_transaction_body_ptr (
10581 as_a <gtransaction *> (stmt)),
10582 ctx);
10583 break;
10584 case GIMPLE_BIND:
10585 lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx);
10586 break;
10587 case GIMPLE_OMP_PARALLEL:
10588 case GIMPLE_OMP_TASK:
10589 ctx = maybe_lookup_ctx (stmt);
10590 gcc_assert (ctx);
10591 if (ctx->cancellable)
10592 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10593 lower_omp_taskreg (gsi_p, ctx);
10594 break;
10595 case GIMPLE_OMP_FOR:
10596 ctx = maybe_lookup_ctx (stmt);
10597 gcc_assert (ctx);
10598 if (ctx->cancellable)
10599 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10600 lower_omp_for (gsi_p, ctx);
10601 break;
10602 case GIMPLE_OMP_SECTIONS:
10603 ctx = maybe_lookup_ctx (stmt);
10604 gcc_assert (ctx);
10605 if (ctx->cancellable)
10606 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10607 lower_omp_sections (gsi_p, ctx);
10608 break;
10609 case GIMPLE_OMP_SINGLE:
10610 ctx = maybe_lookup_ctx (stmt);
10611 gcc_assert (ctx);
10612 lower_omp_single (gsi_p, ctx);
10613 break;
10614 case GIMPLE_OMP_MASTER:
10615 ctx = maybe_lookup_ctx (stmt);
10616 gcc_assert (ctx);
10617 lower_omp_master (gsi_p, ctx);
10618 break;
10619 case GIMPLE_OMP_TASKGROUP:
10620 ctx = maybe_lookup_ctx (stmt);
10621 gcc_assert (ctx);
10622 lower_omp_taskgroup (gsi_p, ctx);
10623 break;
10624 case GIMPLE_OMP_ORDERED:
10625 ctx = maybe_lookup_ctx (stmt);
10626 gcc_assert (ctx);
10627 lower_omp_ordered (gsi_p, ctx);
10628 break;
10629 case GIMPLE_OMP_CRITICAL:
10630 ctx = maybe_lookup_ctx (stmt);
10631 gcc_assert (ctx);
10632 lower_omp_critical (gsi_p, ctx);
10633 break;
10634 case GIMPLE_OMP_ATOMIC_LOAD:
10635 if ((ctx || task_shared_vars)
10636 && walk_tree (gimple_omp_atomic_load_rhs_ptr (
10637 as_a <gomp_atomic_load *> (stmt)),
10638 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
10639 gimple_regimplify_operands (stmt, gsi_p);
10640 break;
10641 case GIMPLE_OMP_TARGET:
10642 ctx = maybe_lookup_ctx (stmt);
10643 gcc_assert (ctx);
10644 lower_omp_target (gsi_p, ctx);
10645 break;
10646 case GIMPLE_OMP_TEAMS:
10647 ctx = maybe_lookup_ctx (stmt);
10648 gcc_assert (ctx);
10649 lower_omp_teams (gsi_p, ctx);
10650 break;
10651 case GIMPLE_CALL:
10652 tree fndecl;
10653 call_stmt = as_a <gcall *> (stmt);
10654 fndecl = gimple_call_fndecl (call_stmt);
10655 if (fndecl
10656 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10657 switch (DECL_FUNCTION_CODE (fndecl))
10659 case BUILT_IN_GOMP_BARRIER:
10660 if (ctx == NULL)
10661 break;
10662 /* FALLTHRU */
10663 case BUILT_IN_GOMP_CANCEL:
10664 case BUILT_IN_GOMP_CANCELLATION_POINT:
10665 omp_context *cctx;
10666 cctx = ctx;
10667 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10668 cctx = cctx->outer;
10669 gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
10670 if (!cctx->cancellable)
10672 if (DECL_FUNCTION_CODE (fndecl)
10673 == BUILT_IN_GOMP_CANCELLATION_POINT)
10675 stmt = gimple_build_nop ();
10676 gsi_replace (gsi_p, stmt, false);
10678 break;
10680 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10682 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10683 gimple_call_set_fndecl (call_stmt, fndecl);
10684 gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
10686 tree lhs;
10687 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)));
10688 gimple_call_set_lhs (call_stmt, lhs);
10689 tree fallthru_label;
10690 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10691 gimple g;
10692 g = gimple_build_label (fallthru_label);
10693 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10694 g = gimple_build_cond (NE_EXPR, lhs,
10695 fold_convert (TREE_TYPE (lhs),
10696 boolean_false_node),
10697 cctx->cancel_label, fallthru_label);
10698 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10699 break;
10700 default:
10701 break;
10703 /* FALLTHRU */
10704 default:
10705 if ((ctx || task_shared_vars)
10706 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10707 ctx ? NULL : &wi))
10709 /* Just remove clobbers, this should happen only if we have
10710 "privatized" local addressable variables in SIMD regions,
10711 the clobber isn't needed in that case and gimplifying address
10712 of the ARRAY_REF into a pointer and creating MEM_REF based
10713 clobber would create worse code than we get with the clobber
10714 dropped. */
10715 if (gimple_clobber_p (stmt))
10717 gsi_replace (gsi_p, gimple_build_nop (), true);
10718 break;
10720 gimple_regimplify_operands (stmt, gsi_p);
10722 break;
10726 static void
10727 lower_omp (gimple_seq *body, omp_context *ctx)
10729 location_t saved_location = input_location;
10730 gimple_stmt_iterator gsi;
10731 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10732 lower_omp_1 (&gsi, ctx);
10733 /* During gimplification, we have not always invoked fold_stmt
10734 (gimplify.c:maybe_fold_stmt); call it now. */
10735 if (target_nesting_level)
10736 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10737 fold_stmt (&gsi);
10738 input_location = saved_location;
10741 /* Main entry point. */
10743 static unsigned int
10744 execute_lower_omp (void)
10746 gimple_seq body;
10747 int i;
10748 omp_context *ctx;
10750 /* This pass always runs, to provide PROP_gimple_lomp.
10751 But there is nothing to do unless -fopenmp is given. */
10752 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_cilkplus == 0)
10753 return 0;
10755 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10756 delete_omp_context);
10758 body = gimple_body (current_function_decl);
10759 scan_omp (&body, NULL);
10760 gcc_assert (taskreg_nesting_level == 0);
10761 FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
10762 finish_taskreg_scan (ctx);
10763 taskreg_contexts.release ();
10765 if (all_contexts->root)
10767 if (task_shared_vars)
10768 push_gimplify_context ();
10769 lower_omp (&body, NULL);
10770 if (task_shared_vars)
10771 pop_gimplify_context (NULL);
10774 if (all_contexts)
10776 splay_tree_delete (all_contexts);
10777 all_contexts = NULL;
10779 BITMAP_FREE (task_shared_vars);
10780 return 0;
10783 namespace {
10785 const pass_data pass_data_lower_omp =
10787 GIMPLE_PASS, /* type */
10788 "omplower", /* name */
10789 OPTGROUP_NONE, /* optinfo_flags */
10790 TV_NONE, /* tv_id */
10791 PROP_gimple_any, /* properties_required */
10792 PROP_gimple_lomp, /* properties_provided */
10793 0, /* properties_destroyed */
10794 0, /* todo_flags_start */
10795 0, /* todo_flags_finish */
10798 class pass_lower_omp : public gimple_opt_pass
10800 public:
10801 pass_lower_omp (gcc::context *ctxt)
10802 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10805 /* opt_pass methods: */
10806 virtual unsigned int execute (function *) { return execute_lower_omp (); }
10808 }; // class pass_lower_omp
10810 } // anon namespace
10812 gimple_opt_pass *
10813 make_pass_lower_omp (gcc::context *ctxt)
10815 return new pass_lower_omp (ctxt);
10818 /* The following is a utility to diagnose OpenMP structured block violations.
10819 It is not part of the "omplower" pass, as that's invoked too late. It
10820 should be invoked by the respective front ends after gimplification. */
10822 static splay_tree all_labels;
10824 /* Check for mismatched contexts and generate an error if needed. Return
10825 true if an error is detected. */
10827 static bool
10828 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10829 gimple branch_ctx, gimple label_ctx)
10831 if (label_ctx == branch_ctx)
10832 return false;
10836 Previously we kept track of the label's entire context in diagnose_sb_[12]
10837 so we could traverse it and issue a correct "exit" or "enter" error
10838 message upon a structured block violation.
10840 We built the context by building a list with tree_cons'ing, but there is
10841 no easy counterpart in gimple tuples. It seems like far too much work
10842 for issuing exit/enter error messages. If someone really misses the
10843 distinct error message... patches welcome.
10846 #if 0
10847 /* Try to avoid confusing the user by producing and error message
10848 with correct "exit" or "enter" verbiage. We prefer "exit"
10849 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10850 if (branch_ctx == NULL)
10851 exit_p = false;
10852 else
10854 while (label_ctx)
10856 if (TREE_VALUE (label_ctx) == branch_ctx)
10858 exit_p = false;
10859 break;
10861 label_ctx = TREE_CHAIN (label_ctx);
10865 if (exit_p)
10866 error ("invalid exit from OpenMP structured block");
10867 else
10868 error ("invalid entry to OpenMP structured block");
10869 #endif
10871 bool cilkplus_block = false;
10872 if (flag_cilkplus)
10874 if ((branch_ctx
10875 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10876 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10877 || (label_ctx
10878 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
10879 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10880 cilkplus_block = true;
10883 /* If it's obvious we have an invalid entry, be specific about the error. */
10884 if (branch_ctx == NULL)
10886 if (cilkplus_block)
10887 error ("invalid entry to Cilk Plus structured block");
10888 else
10889 error ("invalid entry to OpenMP structured block");
10891 else
10893 /* Otherwise, be vague and lazy, but efficient. */
10894 if (cilkplus_block)
10895 error ("invalid branch to/from a Cilk Plus structured block");
10896 else
10897 error ("invalid branch to/from an OpenMP structured block");
10900 gsi_replace (gsi_p, gimple_build_nop (), false);
10901 return true;
10904 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10905 where each label is found. */
10907 static tree
10908 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10909 struct walk_stmt_info *wi)
10911 gimple context = (gimple) wi->info;
10912 gimple inner_context;
10913 gimple stmt = gsi_stmt (*gsi_p);
10915 *handled_ops_p = true;
10917 switch (gimple_code (stmt))
10919 WALK_SUBSTMTS;
10921 case GIMPLE_OMP_PARALLEL:
10922 case GIMPLE_OMP_TASK:
10923 case GIMPLE_OMP_SECTIONS:
10924 case GIMPLE_OMP_SINGLE:
10925 case GIMPLE_OMP_SECTION:
10926 case GIMPLE_OMP_MASTER:
10927 case GIMPLE_OMP_ORDERED:
10928 case GIMPLE_OMP_CRITICAL:
10929 case GIMPLE_OMP_TARGET:
10930 case GIMPLE_OMP_TEAMS:
10931 case GIMPLE_OMP_TASKGROUP:
10932 /* The minimal context here is just the current OMP construct. */
10933 inner_context = stmt;
10934 wi->info = inner_context;
10935 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10936 wi->info = context;
10937 break;
10939 case GIMPLE_OMP_FOR:
10940 inner_context = stmt;
10941 wi->info = inner_context;
10942 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10943 walk them. */
10944 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10945 diagnose_sb_1, NULL, wi);
10946 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10947 wi->info = context;
10948 break;
10950 case GIMPLE_LABEL:
10951 splay_tree_insert (all_labels,
10952 (splay_tree_key) gimple_label_label (
10953 as_a <glabel *> (stmt)),
10954 (splay_tree_value) context);
10955 break;
10957 default:
10958 break;
10961 return NULL_TREE;
10964 /* Pass 2: Check each branch and see if its context differs from that of
10965 the destination label's context. */
10967 static tree
10968 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10969 struct walk_stmt_info *wi)
10971 gimple context = (gimple) wi->info;
10972 splay_tree_node n;
10973 gimple stmt = gsi_stmt (*gsi_p);
10975 *handled_ops_p = true;
10977 switch (gimple_code (stmt))
10979 WALK_SUBSTMTS;
10981 case GIMPLE_OMP_PARALLEL:
10982 case GIMPLE_OMP_TASK:
10983 case GIMPLE_OMP_SECTIONS:
10984 case GIMPLE_OMP_SINGLE:
10985 case GIMPLE_OMP_SECTION:
10986 case GIMPLE_OMP_MASTER:
10987 case GIMPLE_OMP_ORDERED:
10988 case GIMPLE_OMP_CRITICAL:
10989 case GIMPLE_OMP_TARGET:
10990 case GIMPLE_OMP_TEAMS:
10991 case GIMPLE_OMP_TASKGROUP:
10992 wi->info = stmt;
10993 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10994 wi->info = context;
10995 break;
10997 case GIMPLE_OMP_FOR:
10998 wi->info = stmt;
10999 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
11000 walk them. */
11001 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
11002 diagnose_sb_2, NULL, wi);
11003 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
11004 wi->info = context;
11005 break;
11007 case GIMPLE_COND:
11009 gcond *cond_stmt = as_a <gcond *> (stmt);
11010 tree lab = gimple_cond_true_label (cond_stmt);
11011 if (lab)
11013 n = splay_tree_lookup (all_labels,
11014 (splay_tree_key) lab);
11015 diagnose_sb_0 (gsi_p, context,
11016 n ? (gimple) n->value : NULL);
11018 lab = gimple_cond_false_label (cond_stmt);
11019 if (lab)
11021 n = splay_tree_lookup (all_labels,
11022 (splay_tree_key) lab);
11023 diagnose_sb_0 (gsi_p, context,
11024 n ? (gimple) n->value : NULL);
11027 break;
11029 case GIMPLE_GOTO:
11031 tree lab = gimple_goto_dest (stmt);
11032 if (TREE_CODE (lab) != LABEL_DECL)
11033 break;
11035 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
11036 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
11038 break;
11040 case GIMPLE_SWITCH:
11042 gswitch *switch_stmt = as_a <gswitch *> (stmt);
11043 unsigned int i;
11044 for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
11046 tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
11047 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
11048 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
11049 break;
11052 break;
11054 case GIMPLE_RETURN:
11055 diagnose_sb_0 (gsi_p, context, NULL);
11056 break;
11058 default:
11059 break;
11062 return NULL_TREE;
11065 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
11066 codes. */
11067 bool
11068 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
11069 int *region_idx)
11071 gimple last = last_stmt (bb);
11072 enum gimple_code code = gimple_code (last);
11073 struct omp_region *cur_region = *region;
11074 bool fallthru = false;
11076 switch (code)
11078 case GIMPLE_OMP_PARALLEL:
11079 case GIMPLE_OMP_TASK:
11080 case GIMPLE_OMP_FOR:
11081 case GIMPLE_OMP_SINGLE:
11082 case GIMPLE_OMP_TEAMS:
11083 case GIMPLE_OMP_MASTER:
11084 case GIMPLE_OMP_TASKGROUP:
11085 case GIMPLE_OMP_ORDERED:
11086 case GIMPLE_OMP_CRITICAL:
11087 case GIMPLE_OMP_SECTION:
11088 cur_region = new_omp_region (bb, code, cur_region);
11089 fallthru = true;
11090 break;
11092 case GIMPLE_OMP_TARGET:
11093 cur_region = new_omp_region (bb, code, cur_region);
11094 fallthru = true;
11095 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
11096 cur_region = cur_region->outer;
11097 break;
11099 case GIMPLE_OMP_SECTIONS:
11100 cur_region = new_omp_region (bb, code, cur_region);
11101 fallthru = true;
11102 break;
11104 case GIMPLE_OMP_SECTIONS_SWITCH:
11105 fallthru = false;
11106 break;
11108 case GIMPLE_OMP_ATOMIC_LOAD:
11109 case GIMPLE_OMP_ATOMIC_STORE:
11110 fallthru = true;
11111 break;
11113 case GIMPLE_OMP_RETURN:
11114 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
11115 somewhere other than the next block. This will be
11116 created later. */
11117 cur_region->exit = bb;
11118 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
11119 cur_region = cur_region->outer;
11120 break;
11122 case GIMPLE_OMP_CONTINUE:
11123 cur_region->cont = bb;
11124 switch (cur_region->type)
11126 case GIMPLE_OMP_FOR:
11127 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
11128 succs edges as abnormal to prevent splitting
11129 them. */
11130 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
11131 /* Make the loopback edge. */
11132 make_edge (bb, single_succ (cur_region->entry),
11133 EDGE_ABNORMAL);
11135 /* Create an edge from GIMPLE_OMP_FOR to exit, which
11136 corresponds to the case that the body of the loop
11137 is not executed at all. */
11138 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
11139 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
11140 fallthru = false;
11141 break;
11143 case GIMPLE_OMP_SECTIONS:
11144 /* Wire up the edges into and out of the nested sections. */
11146 basic_block switch_bb = single_succ (cur_region->entry);
11148 struct omp_region *i;
11149 for (i = cur_region->inner; i ; i = i->next)
11151 gcc_assert (i->type == GIMPLE_OMP_SECTION);
11152 make_edge (switch_bb, i->entry, 0);
11153 make_edge (i->exit, bb, EDGE_FALLTHRU);
11156 /* Make the loopback edge to the block with
11157 GIMPLE_OMP_SECTIONS_SWITCH. */
11158 make_edge (bb, switch_bb, 0);
11160 /* Make the edge from the switch to exit. */
11161 make_edge (switch_bb, bb->next_bb, 0);
11162 fallthru = false;
11164 break;
11166 default:
11167 gcc_unreachable ();
11169 break;
11171 default:
11172 gcc_unreachable ();
11175 if (*region != cur_region)
11177 *region = cur_region;
11178 if (cur_region)
11179 *region_idx = cur_region->entry->index;
11180 else
11181 *region_idx = 0;
11184 return fallthru;
11187 static unsigned int
11188 diagnose_omp_structured_block_errors (void)
11190 struct walk_stmt_info wi;
11191 gimple_seq body = gimple_body (current_function_decl);
11193 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
11195 memset (&wi, 0, sizeof (wi));
11196 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
11198 memset (&wi, 0, sizeof (wi));
11199 wi.want_locations = true;
11200 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
11202 gimple_set_body (current_function_decl, body);
11204 splay_tree_delete (all_labels);
11205 all_labels = NULL;
11207 return 0;
11210 namespace {
11212 const pass_data pass_data_diagnose_omp_blocks =
11214 GIMPLE_PASS, /* type */
11215 "*diagnose_omp_blocks", /* name */
11216 OPTGROUP_NONE, /* optinfo_flags */
11217 TV_NONE, /* tv_id */
11218 PROP_gimple_any, /* properties_required */
11219 0, /* properties_provided */
11220 0, /* properties_destroyed */
11221 0, /* todo_flags_start */
11222 0, /* todo_flags_finish */
11225 class pass_diagnose_omp_blocks : public gimple_opt_pass
11227 public:
11228 pass_diagnose_omp_blocks (gcc::context *ctxt)
11229 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
11232 /* opt_pass methods: */
11233 virtual bool gate (function *) { return flag_openmp || flag_cilkplus; }
11234 virtual unsigned int execute (function *)
11236 return diagnose_omp_structured_block_errors ();
11239 }; // class pass_diagnose_omp_blocks
11241 } // anon namespace
11243 gimple_opt_pass *
11244 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
11246 return new pass_diagnose_omp_blocks (ctxt);
11249 /* SIMD clone supporting code. */
11251 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
11252 of arguments to reserve space for. */
11254 static struct cgraph_simd_clone *
11255 simd_clone_struct_alloc (int nargs)
11257 struct cgraph_simd_clone *clone_info;
11258 size_t len = (sizeof (struct cgraph_simd_clone)
11259 + nargs * sizeof (struct cgraph_simd_clone_arg));
11260 clone_info = (struct cgraph_simd_clone *)
11261 ggc_internal_cleared_alloc (len);
11262 return clone_info;
11265 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
11267 static inline void
11268 simd_clone_struct_copy (struct cgraph_simd_clone *to,
11269 struct cgraph_simd_clone *from)
11271 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
11272 + ((from->nargs - from->inbranch)
11273 * sizeof (struct cgraph_simd_clone_arg))));
11276 /* Return vector of parameter types of function FNDECL. This uses
11277 TYPE_ARG_TYPES if available, otherwise falls back to types of
11278 DECL_ARGUMENTS types. */
11280 vec<tree>
11281 simd_clone_vector_of_formal_parm_types (tree fndecl)
11283 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
11284 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
11285 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
11286 unsigned int i;
11287 tree arg;
11288 FOR_EACH_VEC_ELT (args, i, arg)
11289 args[i] = TREE_TYPE (args[i]);
11290 return args;
11293 /* Given a simd function in NODE, extract the simd specific
11294 information from the OMP clauses passed in CLAUSES, and return
11295 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
11296 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
11297 otherwise set to FALSE. */
11299 static struct cgraph_simd_clone *
11300 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
11301 bool *inbranch_specified)
11303 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
11304 tree t;
11305 int n;
11306 *inbranch_specified = false;
11308 n = args.length ();
11309 if (n > 0 && args.last () == void_type_node)
11310 n--;
11312 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
11313 be cloned have a distinctive artificial label in addition to "omp
11314 declare simd". */
11315 bool cilk_clone
11316 = (flag_cilkplus
11317 && lookup_attribute ("cilk simd function",
11318 DECL_ATTRIBUTES (node->decl)));
11320 /* Allocate one more than needed just in case this is an in-branch
11321 clone which will require a mask argument. */
11322 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
11323 clone_info->nargs = n;
11324 clone_info->cilk_elemental = cilk_clone;
11326 if (!clauses)
11328 args.release ();
11329 return clone_info;
11331 clauses = TREE_VALUE (clauses);
11332 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
11333 return clone_info;
11335 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
11337 switch (OMP_CLAUSE_CODE (t))
11339 case OMP_CLAUSE_INBRANCH:
11340 clone_info->inbranch = 1;
11341 *inbranch_specified = true;
11342 break;
11343 case OMP_CLAUSE_NOTINBRANCH:
11344 clone_info->inbranch = 0;
11345 *inbranch_specified = true;
11346 break;
11347 case OMP_CLAUSE_SIMDLEN:
11348 clone_info->simdlen
11349 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
11350 break;
11351 case OMP_CLAUSE_LINEAR:
11353 tree decl = OMP_CLAUSE_DECL (t);
11354 tree step = OMP_CLAUSE_LINEAR_STEP (t);
11355 int argno = TREE_INT_CST_LOW (decl);
11356 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
11358 clone_info->args[argno].arg_type
11359 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
11360 clone_info->args[argno].linear_step = tree_to_shwi (step);
11361 gcc_assert (clone_info->args[argno].linear_step >= 0
11362 && clone_info->args[argno].linear_step < n);
11364 else
11366 if (POINTER_TYPE_P (args[argno]))
11367 step = fold_convert (ssizetype, step);
11368 if (!tree_fits_shwi_p (step))
11370 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11371 "ignoring large linear step");
11372 args.release ();
11373 return NULL;
11375 else if (integer_zerop (step))
11377 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11378 "ignoring zero linear step");
11379 args.release ();
11380 return NULL;
11382 else
11384 clone_info->args[argno].arg_type
11385 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
11386 clone_info->args[argno].linear_step = tree_to_shwi (step);
11389 break;
11391 case OMP_CLAUSE_UNIFORM:
11393 tree decl = OMP_CLAUSE_DECL (t);
11394 int argno = tree_to_uhwi (decl);
11395 clone_info->args[argno].arg_type
11396 = SIMD_CLONE_ARG_TYPE_UNIFORM;
11397 break;
11399 case OMP_CLAUSE_ALIGNED:
11401 tree decl = OMP_CLAUSE_DECL (t);
11402 int argno = tree_to_uhwi (decl);
11403 clone_info->args[argno].alignment
11404 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
11405 break;
11407 default:
11408 break;
11411 args.release ();
11412 return clone_info;
11415 /* Given a SIMD clone in NODE, calculate the characteristic data
11416 type and return the coresponding type. The characteristic data
11417 type is computed as described in the Intel Vector ABI. */
11419 static tree
11420 simd_clone_compute_base_data_type (struct cgraph_node *node,
11421 struct cgraph_simd_clone *clone_info)
11423 tree type = integer_type_node;
11424 tree fndecl = node->decl;
11426 /* a) For non-void function, the characteristic data type is the
11427 return type. */
11428 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
11429 type = TREE_TYPE (TREE_TYPE (fndecl));
11431 /* b) If the function has any non-uniform, non-linear parameters,
11432 then the characteristic data type is the type of the first
11433 such parameter. */
11434 else
11436 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
11437 for (unsigned int i = 0; i < clone_info->nargs; ++i)
11438 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
11440 type = map[i];
11441 break;
11443 map.release ();
11446 /* c) If the characteristic data type determined by a) or b) above
11447 is struct, union, or class type which is pass-by-value (except
11448 for the type that maps to the built-in complex data type), the
11449 characteristic data type is int. */
11450 if (RECORD_OR_UNION_TYPE_P (type)
11451 && !aggregate_value_p (type, NULL)
11452 && TREE_CODE (type) != COMPLEX_TYPE)
11453 return integer_type_node;
11455 /* d) If none of the above three classes is applicable, the
11456 characteristic data type is int. */
11458 return type;
11460 /* e) For Intel Xeon Phi native and offload compilation, if the
11461 resulting characteristic data type is 8-bit or 16-bit integer
11462 data type, the characteristic data type is int. */
11463 /* Well, we don't handle Xeon Phi yet. */
11466 static tree
11467 simd_clone_mangle (struct cgraph_node *node,
11468 struct cgraph_simd_clone *clone_info)
11470 char vecsize_mangle = clone_info->vecsize_mangle;
11471 char mask = clone_info->inbranch ? 'M' : 'N';
11472 unsigned int simdlen = clone_info->simdlen;
11473 unsigned int n;
11474 pretty_printer pp;
11476 gcc_assert (vecsize_mangle && simdlen);
11478 pp_string (&pp, "_ZGV");
11479 pp_character (&pp, vecsize_mangle);
11480 pp_character (&pp, mask);
11481 pp_decimal_int (&pp, simdlen);
11483 for (n = 0; n < clone_info->nargs; ++n)
11485 struct cgraph_simd_clone_arg arg = clone_info->args[n];
11487 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
11488 pp_character (&pp, 'u');
11489 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
11491 gcc_assert (arg.linear_step != 0);
11492 pp_character (&pp, 'l');
11493 if (arg.linear_step > 1)
11494 pp_unsigned_wide_integer (&pp, arg.linear_step);
11495 else if (arg.linear_step < 0)
11497 pp_character (&pp, 'n');
11498 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
11499 arg.linear_step));
11502 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
11504 pp_character (&pp, 's');
11505 pp_unsigned_wide_integer (&pp, arg.linear_step);
11507 else
11508 pp_character (&pp, 'v');
11509 if (arg.alignment)
11511 pp_character (&pp, 'a');
11512 pp_decimal_int (&pp, arg.alignment);
11516 pp_underscore (&pp);
11517 pp_string (&pp,
11518 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)));
11519 const char *str = pp_formatted_text (&pp);
11521 /* If there already is a SIMD clone with the same mangled name, don't
11522 add another one. This can happen e.g. for
11523 #pragma omp declare simd
11524 #pragma omp declare simd simdlen(8)
11525 int foo (int, int);
11526 if the simdlen is assumed to be 8 for the first one, etc. */
11527 for (struct cgraph_node *clone = node->simd_clones; clone;
11528 clone = clone->simdclone->next_clone)
11529 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
11530 str) == 0)
11531 return NULL_TREE;
11533 return get_identifier (str);
11536 /* Create a simd clone of OLD_NODE and return it. */
11538 static struct cgraph_node *
11539 simd_clone_create (struct cgraph_node *old_node)
11541 struct cgraph_node *new_node;
11542 if (old_node->definition)
11544 if (!old_node->has_gimple_body_p ())
11545 return NULL;
11546 old_node->get_body ();
11547 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
11548 false, NULL, NULL,
11549 "simdclone");
11551 else
11553 tree old_decl = old_node->decl;
11554 tree new_decl = copy_node (old_node->decl);
11555 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
11556 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
11557 SET_DECL_RTL (new_decl, NULL);
11558 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
11559 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
11560 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
11561 symtab->call_cgraph_insertion_hooks (new_node);
11563 if (new_node == NULL)
11564 return new_node;
11566 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
11568 /* The function cgraph_function_versioning () will force the new
11569 symbol local. Undo this, and inherit external visability from
11570 the old node. */
11571 new_node->local.local = old_node->local.local;
11572 new_node->externally_visible = old_node->externally_visible;
11574 return new_node;
11577 /* Adjust the return type of the given function to its appropriate
11578 vector counterpart. Returns a simd array to be used throughout the
11579 function as a return value. */
11581 static tree
11582 simd_clone_adjust_return_type (struct cgraph_node *node)
11584 tree fndecl = node->decl;
11585 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
11586 unsigned int veclen;
11587 tree t;
11589 /* Adjust the function return type. */
11590 if (orig_rettype == void_type_node)
11591 return NULL_TREE;
11592 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
11593 t = TREE_TYPE (TREE_TYPE (fndecl));
11594 if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t))
11595 veclen = node->simdclone->vecsize_int;
11596 else
11597 veclen = node->simdclone->vecsize_float;
11598 veclen /= GET_MODE_BITSIZE (TYPE_MODE (t));
11599 if (veclen > node->simdclone->simdlen)
11600 veclen = node->simdclone->simdlen;
11601 if (POINTER_TYPE_P (t))
11602 t = pointer_sized_int_node;
11603 if (veclen == node->simdclone->simdlen)
11604 t = build_vector_type (t, node->simdclone->simdlen);
11605 else
11607 t = build_vector_type (t, veclen);
11608 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
11610 TREE_TYPE (TREE_TYPE (fndecl)) = t;
11611 if (!node->definition)
11612 return NULL_TREE;
11614 t = DECL_RESULT (fndecl);
11615 /* Adjust the DECL_RESULT. */
11616 gcc_assert (TREE_TYPE (t) != void_type_node);
11617 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
11618 relayout_decl (t);
11620 tree atype = build_array_type_nelts (orig_rettype,
11621 node->simdclone->simdlen);
11622 if (veclen != node->simdclone->simdlen)
11623 return build1 (VIEW_CONVERT_EXPR, atype, t);
11625 /* Set up a SIMD array to use as the return value. */
11626 tree retval = create_tmp_var_raw (atype, "retval");
11627 gimple_add_tmp_var (retval);
11628 return retval;
11631 /* Each vector argument has a corresponding array to be used locally
11632 as part of the eventual loop. Create such temporary array and
11633 return it.
11635 PREFIX is the prefix to be used for the temporary.
11637 TYPE is the inner element type.
11639 SIMDLEN is the number of elements. */
11641 static tree
11642 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
11644 tree atype = build_array_type_nelts (type, simdlen);
11645 tree avar = create_tmp_var_raw (atype, prefix);
11646 gimple_add_tmp_var (avar);
11647 return avar;
11650 /* Modify the function argument types to their corresponding vector
11651 counterparts if appropriate. Also, create one array for each simd
11652 argument to be used locally when using the function arguments as
11653 part of the loop.
11655 NODE is the function whose arguments are to be adjusted.
11657 Returns an adjustment vector that will be filled describing how the
11658 argument types will be adjusted. */
11660 static ipa_parm_adjustment_vec
11661 simd_clone_adjust_argument_types (struct cgraph_node *node)
11663 vec<tree> args;
11664 ipa_parm_adjustment_vec adjustments;
11666 if (node->definition)
11667 args = ipa_get_vector_of_formal_parms (node->decl);
11668 else
11669 args = simd_clone_vector_of_formal_parm_types (node->decl);
11670 adjustments.create (args.length ());
11671 unsigned i, j, veclen;
11672 struct ipa_parm_adjustment adj;
11673 for (i = 0; i < node->simdclone->nargs; ++i)
11675 memset (&adj, 0, sizeof (adj));
11676 tree parm = args[i];
11677 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
11678 adj.base_index = i;
11679 adj.base = parm;
11681 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
11682 node->simdclone->args[i].orig_type = parm_type;
11684 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
11686 /* No adjustment necessary for scalar arguments. */
11687 adj.op = IPA_PARM_OP_COPY;
11689 else
11691 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
11692 veclen = node->simdclone->vecsize_int;
11693 else
11694 veclen = node->simdclone->vecsize_float;
11695 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
11696 if (veclen > node->simdclone->simdlen)
11697 veclen = node->simdclone->simdlen;
11698 adj.arg_prefix = "simd";
11699 if (POINTER_TYPE_P (parm_type))
11700 adj.type = build_vector_type (pointer_sized_int_node, veclen);
11701 else
11702 adj.type = build_vector_type (parm_type, veclen);
11703 node->simdclone->args[i].vector_type = adj.type;
11704 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11706 adjustments.safe_push (adj);
11707 if (j == veclen)
11709 memset (&adj, 0, sizeof (adj));
11710 adj.op = IPA_PARM_OP_NEW;
11711 adj.arg_prefix = "simd";
11712 adj.base_index = i;
11713 adj.type = node->simdclone->args[i].vector_type;
11717 if (node->definition)
11718 node->simdclone->args[i].simd_array
11719 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
11720 parm_type, node->simdclone->simdlen);
11722 adjustments.safe_push (adj);
11725 if (node->simdclone->inbranch)
11727 tree base_type
11728 = simd_clone_compute_base_data_type (node->simdclone->origin,
11729 node->simdclone);
11731 memset (&adj, 0, sizeof (adj));
11732 adj.op = IPA_PARM_OP_NEW;
11733 adj.arg_prefix = "mask";
11735 adj.base_index = i;
11736 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
11737 veclen = node->simdclone->vecsize_int;
11738 else
11739 veclen = node->simdclone->vecsize_float;
11740 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
11741 if (veclen > node->simdclone->simdlen)
11742 veclen = node->simdclone->simdlen;
11743 if (POINTER_TYPE_P (base_type))
11744 adj.type = build_vector_type (pointer_sized_int_node, veclen);
11745 else
11746 adj.type = build_vector_type (base_type, veclen);
11747 adjustments.safe_push (adj);
11749 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11750 adjustments.safe_push (adj);
11752 /* We have previously allocated one extra entry for the mask. Use
11753 it and fill it. */
11754 struct cgraph_simd_clone *sc = node->simdclone;
11755 sc->nargs++;
11756 if (node->definition)
11758 sc->args[i].orig_arg
11759 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
11760 sc->args[i].simd_array
11761 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
11763 sc->args[i].orig_type = base_type;
11764 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
11767 if (node->definition)
11768 ipa_modify_formal_parameters (node->decl, adjustments);
11769 else
11771 tree new_arg_types = NULL_TREE, new_reversed;
11772 bool last_parm_void = false;
11773 if (args.length () > 0 && args.last () == void_type_node)
11774 last_parm_void = true;
11776 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
11777 j = adjustments.length ();
11778 for (i = 0; i < j; i++)
11780 struct ipa_parm_adjustment *adj = &adjustments[i];
11781 tree ptype;
11782 if (adj->op == IPA_PARM_OP_COPY)
11783 ptype = args[adj->base_index];
11784 else
11785 ptype = adj->type;
11786 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
11788 new_reversed = nreverse (new_arg_types);
11789 if (last_parm_void)
11791 if (new_reversed)
11792 TREE_CHAIN (new_arg_types) = void_list_node;
11793 else
11794 new_reversed = void_list_node;
11797 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
11798 TYPE_ARG_TYPES (new_type) = new_reversed;
11799 TREE_TYPE (node->decl) = new_type;
11801 adjustments.release ();
11803 args.release ();
11804 return adjustments;
11807 /* Initialize and copy the function arguments in NODE to their
11808 corresponding local simd arrays. Returns a fresh gimple_seq with
11809 the instruction sequence generated. */
11811 static gimple_seq
11812 simd_clone_init_simd_arrays (struct cgraph_node *node,
11813 ipa_parm_adjustment_vec adjustments)
11815 gimple_seq seq = NULL;
11816 unsigned i = 0, j = 0, k;
11818 for (tree arg = DECL_ARGUMENTS (node->decl);
11819 arg;
11820 arg = DECL_CHAIN (arg), i++, j++)
11822 if (adjustments[j].op == IPA_PARM_OP_COPY)
11823 continue;
11825 node->simdclone->args[i].vector_arg = arg;
11827 tree array = node->simdclone->args[i].simd_array;
11828 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
11830 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11831 tree ptr = build_fold_addr_expr (array);
11832 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11833 build_int_cst (ptype, 0));
11834 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11835 gimplify_and_add (t, &seq);
11837 else
11839 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
11840 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11841 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
11843 tree ptr = build_fold_addr_expr (array);
11844 int elemsize;
11845 if (k)
11847 arg = DECL_CHAIN (arg);
11848 j++;
11850 elemsize
11851 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
11852 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11853 build_int_cst (ptype, k * elemsize));
11854 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11855 gimplify_and_add (t, &seq);
11859 return seq;
11862 /* Callback info for ipa_simd_modify_stmt_ops below. */
11864 struct modify_stmt_info {
11865 ipa_parm_adjustment_vec adjustments;
11866 gimple stmt;
11867 /* True if the parent statement was modified by
11868 ipa_simd_modify_stmt_ops. */
11869 bool modified;
11872 /* Callback for walk_gimple_op.
11874 Adjust operands from a given statement as specified in the
11875 adjustments vector in the callback data. */
11877 static tree
11878 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
11880 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
11881 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
11882 tree *orig_tp = tp;
11883 if (TREE_CODE (*tp) == ADDR_EXPR)
11884 tp = &TREE_OPERAND (*tp, 0);
11885 struct ipa_parm_adjustment *cand = NULL;
11886 if (TREE_CODE (*tp) == PARM_DECL)
11887 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
11888 else
11890 if (TYPE_P (*tp))
11891 *walk_subtrees = 0;
11894 tree repl = NULL_TREE;
11895 if (cand)
11896 repl = unshare_expr (cand->new_decl);
11897 else
11899 if (tp != orig_tp)
11901 *walk_subtrees = 0;
11902 bool modified = info->modified;
11903 info->modified = false;
11904 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
11905 if (!info->modified)
11907 info->modified = modified;
11908 return NULL_TREE;
11910 info->modified = modified;
11911 repl = *tp;
11913 else
11914 return NULL_TREE;
11917 if (tp != orig_tp)
11919 repl = build_fold_addr_expr (repl);
11920 gimple stmt;
11921 if (is_gimple_debug (info->stmt))
11923 tree vexpr = make_node (DEBUG_EXPR_DECL);
11924 stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
11925 DECL_ARTIFICIAL (vexpr) = 1;
11926 TREE_TYPE (vexpr) = TREE_TYPE (repl);
11927 DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (repl));
11928 repl = vexpr;
11930 else
11932 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl);
11933 repl = gimple_assign_lhs (stmt);
11935 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
11936 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11937 *orig_tp = repl;
11939 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
11941 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
11942 *tp = vce;
11944 else
11945 *tp = repl;
11947 info->modified = true;
11948 return NULL_TREE;
11951 /* Traverse the function body and perform all modifications as
11952 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11953 modified such that the replacement/reduction value will now be an
11954 offset into the corresponding simd_array.
11956 This function will replace all function argument uses with their
11957 corresponding simd array elements, and ajust the return values
11958 accordingly. */
11960 static void
11961 ipa_simd_modify_function_body (struct cgraph_node *node,
11962 ipa_parm_adjustment_vec adjustments,
11963 tree retval_array, tree iter)
11965 basic_block bb;
11966 unsigned int i, j, l;
11968 /* Re-use the adjustments array, but this time use it to replace
11969 every function argument use to an offset into the corresponding
11970 simd_array. */
11971 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
11973 if (!node->simdclone->args[i].vector_arg)
11974 continue;
11976 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
11977 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
11978 adjustments[j].new_decl
11979 = build4 (ARRAY_REF,
11980 basetype,
11981 node->simdclone->args[i].simd_array,
11982 iter,
11983 NULL_TREE, NULL_TREE);
11984 if (adjustments[j].op == IPA_PARM_OP_NONE
11985 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
11986 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
11989 l = adjustments.length ();
11990 for (i = 1; i < num_ssa_names; i++)
11992 tree name = ssa_name (i);
11993 if (name
11994 && SSA_NAME_VAR (name)
11995 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
11997 for (j = 0; j < l; j++)
11998 if (SSA_NAME_VAR (name) == adjustments[j].base
11999 && adjustments[j].new_decl)
12001 tree base_var;
12002 if (adjustments[j].new_ssa_base == NULL_TREE)
12004 base_var
12005 = copy_var_decl (adjustments[j].base,
12006 DECL_NAME (adjustments[j].base),
12007 TREE_TYPE (adjustments[j].base));
12008 adjustments[j].new_ssa_base = base_var;
12010 else
12011 base_var = adjustments[j].new_ssa_base;
12012 if (SSA_NAME_IS_DEFAULT_DEF (name))
12014 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12015 gimple_stmt_iterator gsi = gsi_after_labels (bb);
12016 tree new_decl = unshare_expr (adjustments[j].new_decl);
12017 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
12018 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
12019 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
12020 gimple stmt = gimple_build_assign (name, new_decl);
12021 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
12023 else
12024 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
12029 struct modify_stmt_info info;
12030 info.adjustments = adjustments;
12032 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
12034 gimple_stmt_iterator gsi;
12036 gsi = gsi_start_bb (bb);
12037 while (!gsi_end_p (gsi))
12039 gimple stmt = gsi_stmt (gsi);
12040 info.stmt = stmt;
12041 struct walk_stmt_info wi;
12043 memset (&wi, 0, sizeof (wi));
12044 info.modified = false;
12045 wi.info = &info;
12046 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
12048 if (greturn *return_stmt = dyn_cast <greturn *> (stmt))
12050 tree retval = gimple_return_retval (return_stmt);
12051 if (!retval)
12053 gsi_remove (&gsi, true);
12054 continue;
12057 /* Replace `return foo' with `retval_array[iter] = foo'. */
12058 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
12059 retval_array, iter, NULL, NULL);
12060 stmt = gimple_build_assign (ref, retval);
12061 gsi_replace (&gsi, stmt, true);
12062 info.modified = true;
12065 if (info.modified)
12067 update_stmt (stmt);
12068 if (maybe_clean_eh_stmt (stmt))
12069 gimple_purge_dead_eh_edges (gimple_bb (stmt));
12071 gsi_next (&gsi);
12076 /* Adjust the argument types in NODE to their appropriate vector
12077 counterparts. */
12079 static void
12080 simd_clone_adjust (struct cgraph_node *node)
12082 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
12084 targetm.simd_clone.adjust (node);
12086 tree retval = simd_clone_adjust_return_type (node);
12087 ipa_parm_adjustment_vec adjustments
12088 = simd_clone_adjust_argument_types (node);
12090 push_gimplify_context ();
12092 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
12094 /* Adjust all uses of vector arguments accordingly. Adjust all
12095 return values accordingly. */
12096 tree iter = create_tmp_var (unsigned_type_node, "iter");
12097 tree iter1 = make_ssa_name (iter);
12098 tree iter2 = make_ssa_name (iter);
12099 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
12101 /* Initialize the iteration variable. */
12102 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12103 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
12104 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
12105 /* Insert the SIMD array and iv initialization at function
12106 entry. */
12107 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
12109 pop_gimplify_context (NULL);
12111 /* Create a new BB right before the original exit BB, to hold the
12112 iteration increment and the condition/branch. */
12113 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
12114 basic_block incr_bb = create_empty_bb (orig_exit);
12115 add_bb_to_loop (incr_bb, body_bb->loop_father);
12116 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
12117 flag. Set it now to be a FALLTHRU_EDGE. */
12118 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
12119 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
12120 for (unsigned i = 0;
12121 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
12123 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
12124 redirect_edge_succ (e, incr_bb);
12126 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
12127 e->probability = REG_BR_PROB_BASE;
12128 gsi = gsi_last_bb (incr_bb);
12129 gimple g = gimple_build_assign (iter2, PLUS_EXPR, iter1,
12130 build_int_cst (unsigned_type_node, 1));
12131 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12133 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
12134 struct loop *loop = alloc_loop ();
12135 cfun->has_force_vectorize_loops = true;
12136 loop->safelen = node->simdclone->simdlen;
12137 loop->force_vectorize = true;
12138 loop->header = body_bb;
12140 /* Branch around the body if the mask applies. */
12141 if (node->simdclone->inbranch)
12143 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
12144 tree mask_array
12145 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
12146 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
12147 tree aref = build4 (ARRAY_REF,
12148 TREE_TYPE (TREE_TYPE (mask_array)),
12149 mask_array, iter1,
12150 NULL, NULL);
12151 g = gimple_build_assign (mask, aref);
12152 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12153 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
12154 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
12156 aref = build1 (VIEW_CONVERT_EXPR,
12157 build_nonstandard_integer_type (bitsize, 0), mask);
12158 mask = make_ssa_name (TREE_TYPE (aref));
12159 g = gimple_build_assign (mask, aref);
12160 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12163 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
12164 NULL, NULL);
12165 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12166 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
12167 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
12170 /* Generate the condition. */
12171 g = gimple_build_cond (LT_EXPR,
12172 iter2,
12173 build_int_cst (unsigned_type_node,
12174 node->simdclone->simdlen),
12175 NULL, NULL);
12176 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12177 e = split_block (incr_bb, gsi_stmt (gsi));
12178 basic_block latch_bb = e->dest;
12179 basic_block new_exit_bb;
12180 new_exit_bb = split_block (latch_bb, NULL)->dest;
12181 loop->latch = latch_bb;
12183 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
12185 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
12186 /* The successor of incr_bb is already pointing to latch_bb; just
12187 change the flags.
12188 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
12189 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
12191 gphi *phi = create_phi_node (iter1, body_bb);
12192 edge preheader_edge = find_edge (entry_bb, body_bb);
12193 edge latch_edge = single_succ_edge (latch_bb);
12194 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
12195 UNKNOWN_LOCATION);
12196 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12198 /* Generate the new return. */
12199 gsi = gsi_last_bb (new_exit_bb);
12200 if (retval
12201 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
12202 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
12203 retval = TREE_OPERAND (retval, 0);
12204 else if (retval)
12206 retval = build1 (VIEW_CONVERT_EXPR,
12207 TREE_TYPE (TREE_TYPE (node->decl)),
12208 retval);
12209 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
12210 false, GSI_CONTINUE_LINKING);
12212 g = gimple_build_return (retval);
12213 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12215 /* Handle aligned clauses by replacing default defs of the aligned
12216 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
12217 lhs. Handle linear by adding PHIs. */
12218 for (unsigned i = 0; i < node->simdclone->nargs; i++)
12219 if (node->simdclone->args[i].alignment
12220 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
12221 && (node->simdclone->args[i].alignment
12222 & (node->simdclone->args[i].alignment - 1)) == 0
12223 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
12224 == POINTER_TYPE)
12226 unsigned int alignment = node->simdclone->args[i].alignment;
12227 tree orig_arg = node->simdclone->args[i].orig_arg;
12228 tree def = ssa_default_def (cfun, orig_arg);
12229 if (def && !has_zero_uses (def))
12231 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
12232 gimple_seq seq = NULL;
12233 bool need_cvt = false;
12234 gcall *call
12235 = gimple_build_call (fn, 2, def, size_int (alignment));
12236 g = call;
12237 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
12238 ptr_type_node))
12239 need_cvt = true;
12240 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg);
12241 gimple_call_set_lhs (g, t);
12242 gimple_seq_add_stmt_without_update (&seq, g);
12243 if (need_cvt)
12245 t = make_ssa_name (orig_arg);
12246 g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g));
12247 gimple_seq_add_stmt_without_update (&seq, g);
12249 gsi_insert_seq_on_edge_immediate
12250 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
12252 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12253 int freq = compute_call_stmt_bb_frequency (current_function_decl,
12254 entry_bb);
12255 node->create_edge (cgraph_node::get_create (fn),
12256 call, entry_bb->count, freq);
12258 imm_use_iterator iter;
12259 use_operand_p use_p;
12260 gimple use_stmt;
12261 tree repl = gimple_get_lhs (g);
12262 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12263 if (is_gimple_debug (use_stmt) || use_stmt == call)
12264 continue;
12265 else
12266 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12267 SET_USE (use_p, repl);
12270 else if (node->simdclone->args[i].arg_type
12271 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
12273 tree orig_arg = node->simdclone->args[i].orig_arg;
12274 tree def = ssa_default_def (cfun, orig_arg);
12275 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12276 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
12277 if (def && !has_zero_uses (def))
12279 iter1 = make_ssa_name (orig_arg);
12280 iter2 = make_ssa_name (orig_arg);
12281 phi = create_phi_node (iter1, body_bb);
12282 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
12283 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12284 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12285 ? PLUS_EXPR : POINTER_PLUS_EXPR;
12286 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12287 ? TREE_TYPE (orig_arg) : sizetype;
12288 tree addcst
12289 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
12290 g = gimple_build_assign (iter2, code, iter1, addcst);
12291 gsi = gsi_last_bb (incr_bb);
12292 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
12294 imm_use_iterator iter;
12295 use_operand_p use_p;
12296 gimple use_stmt;
12297 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12298 if (use_stmt == phi)
12299 continue;
12300 else
12301 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12302 SET_USE (use_p, iter1);
12306 calculate_dominance_info (CDI_DOMINATORS);
12307 add_loop (loop, loop->header->loop_father);
12308 update_ssa (TODO_update_ssa);
12310 pop_cfun ();
12313 /* If the function in NODE is tagged as an elemental SIMD function,
12314 create the appropriate SIMD clones. */
12316 static void
12317 expand_simd_clones (struct cgraph_node *node)
12319 tree attr = lookup_attribute ("omp declare simd",
12320 DECL_ATTRIBUTES (node->decl));
12321 if (attr == NULL_TREE
12322 || node->global.inlined_to
12323 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
12324 return;
12326 /* Ignore
12327 #pragma omp declare simd
12328 extern int foo ();
12329 in C, there we don't know the argument types at all. */
12330 if (!node->definition
12331 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
12332 return;
12336 /* Start with parsing the "omp declare simd" attribute(s). */
12337 bool inbranch_clause_specified;
12338 struct cgraph_simd_clone *clone_info
12339 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
12340 &inbranch_clause_specified);
12341 if (clone_info == NULL)
12342 continue;
12344 int orig_simdlen = clone_info->simdlen;
12345 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
12346 /* The target can return 0 (no simd clones should be created),
12347 1 (just one ISA of simd clones should be created) or higher
12348 count of ISA variants. In that case, clone_info is initialized
12349 for the first ISA variant. */
12350 int count
12351 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
12352 base_type, 0);
12353 if (count == 0)
12354 continue;
12356 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
12357 also create one inbranch and one !inbranch clone of it. */
12358 for (int i = 0; i < count * 2; i++)
12360 struct cgraph_simd_clone *clone = clone_info;
12361 if (inbranch_clause_specified && (i & 1) != 0)
12362 continue;
12364 if (i != 0)
12366 clone = simd_clone_struct_alloc (clone_info->nargs
12367 + ((i & 1) != 0));
12368 simd_clone_struct_copy (clone, clone_info);
12369 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
12370 and simd_clone_adjust_argument_types did to the first
12371 clone's info. */
12372 clone->nargs -= clone_info->inbranch;
12373 clone->simdlen = orig_simdlen;
12374 /* And call the target hook again to get the right ISA. */
12375 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
12376 base_type,
12377 i / 2);
12378 if ((i & 1) != 0)
12379 clone->inbranch = 1;
12382 /* simd_clone_mangle might fail if such a clone has been created
12383 already. */
12384 tree id = simd_clone_mangle (node, clone);
12385 if (id == NULL_TREE)
12386 continue;
12388 /* Only when we are sure we want to create the clone actually
12389 clone the function (or definitions) or create another
12390 extern FUNCTION_DECL (for prototypes without definitions). */
12391 struct cgraph_node *n = simd_clone_create (node);
12392 if (n == NULL)
12393 continue;
12395 n->simdclone = clone;
12396 clone->origin = node;
12397 clone->next_clone = NULL;
12398 if (node->simd_clones == NULL)
12400 clone->prev_clone = n;
12401 node->simd_clones = n;
12403 else
12405 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
12406 clone->prev_clone->simdclone->next_clone = n;
12407 node->simd_clones->simdclone->prev_clone = n;
12409 symtab->change_decl_assembler_name (n->decl, id);
12410 /* And finally adjust the return type, parameters and for
12411 definitions also function body. */
12412 if (node->definition)
12413 simd_clone_adjust (n);
12414 else
12416 simd_clone_adjust_return_type (n);
12417 simd_clone_adjust_argument_types (n);
12421 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
12424 /* Entry point for IPA simd clone creation pass. */
12426 static unsigned int
12427 ipa_omp_simd_clone (void)
12429 struct cgraph_node *node;
12430 FOR_EACH_FUNCTION (node)
12431 expand_simd_clones (node);
12432 return 0;
12435 namespace {
12437 const pass_data pass_data_omp_simd_clone =
12439 SIMPLE_IPA_PASS, /* type */
12440 "simdclone", /* name */
12441 OPTGROUP_NONE, /* optinfo_flags */
12442 TV_NONE, /* tv_id */
12443 ( PROP_ssa | PROP_cfg ), /* properties_required */
12444 0, /* properties_provided */
12445 0, /* properties_destroyed */
12446 0, /* todo_flags_start */
12447 0, /* todo_flags_finish */
12450 class pass_omp_simd_clone : public simple_ipa_opt_pass
12452 public:
12453 pass_omp_simd_clone(gcc::context *ctxt)
12454 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
12457 /* opt_pass methods: */
12458 virtual bool gate (function *);
12459 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
12462 bool
12463 pass_omp_simd_clone::gate (function *)
12465 return ((flag_openmp || flag_openmp_simd
12466 || flag_cilkplus
12467 || (in_lto_p && !flag_wpa))
12468 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
12471 } // anon namespace
12473 simple_ipa_opt_pass *
12474 make_pass_omp_simd_clone (gcc::context *ctxt)
12476 return new pass_omp_simd_clone (ctxt);
12479 /* Helper function for omp_finish_file routine. Takes decls from V_DECLS and
12480 adds their addresses and sizes to constructor-vector V_CTOR. */
12481 static void
12482 add_decls_addresses_to_decl_constructor (vec<tree, va_gc> *v_decls,
12483 vec<constructor_elt, va_gc> *v_ctor)
12485 unsigned len = vec_safe_length (v_decls);
12486 for (unsigned i = 0; i < len; i++)
12488 tree it = (*v_decls)[i];
12489 bool is_function = TREE_CODE (it) != VAR_DECL;
12491 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE, build_fold_addr_expr (it));
12492 if (!is_function)
12493 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE,
12494 fold_convert (const_ptr_type_node,
12495 DECL_SIZE_UNIT (it)));
12499 /* Create new symbols containing (address, size) pairs for global variables,
12500 marked with "omp declare target" attribute, as well as addresses for the
12501 functions, which are outlined target regions. */
12502 void
12503 omp_finish_file (void)
12505 unsigned num_funcs = vec_safe_length (offload_funcs);
12506 unsigned num_vars = vec_safe_length (offload_vars);
12508 if (num_funcs == 0 && num_vars == 0)
12509 return;
12511 if (targetm_common.have_named_sections)
12513 vec<constructor_elt, va_gc> *v_f, *v_v;
12514 vec_alloc (v_f, num_funcs);
12515 vec_alloc (v_v, num_vars * 2);
12517 add_decls_addresses_to_decl_constructor (offload_funcs, v_f);
12518 add_decls_addresses_to_decl_constructor (offload_vars, v_v);
12520 tree vars_decl_type = build_array_type_nelts (pointer_sized_int_node,
12521 num_vars * 2);
12522 tree funcs_decl_type = build_array_type_nelts (pointer_sized_int_node,
12523 num_funcs);
12524 TYPE_ALIGN (vars_decl_type) = TYPE_ALIGN (pointer_sized_int_node);
12525 TYPE_ALIGN (funcs_decl_type) = TYPE_ALIGN (pointer_sized_int_node);
12526 tree ctor_v = build_constructor (vars_decl_type, v_v);
12527 tree ctor_f = build_constructor (funcs_decl_type, v_f);
12528 TREE_CONSTANT (ctor_v) = TREE_CONSTANT (ctor_f) = 1;
12529 TREE_STATIC (ctor_v) = TREE_STATIC (ctor_f) = 1;
12530 tree funcs_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
12531 get_identifier (".offload_func_table"),
12532 funcs_decl_type);
12533 tree vars_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
12534 get_identifier (".offload_var_table"),
12535 vars_decl_type);
12536 TREE_STATIC (funcs_decl) = TREE_STATIC (vars_decl) = 1;
12537 /* Do not align tables more than TYPE_ALIGN (pointer_sized_int_node),
12538 otherwise a joint table in a binary will contain padding between
12539 tables from multiple object files. */
12540 DECL_USER_ALIGN (funcs_decl) = DECL_USER_ALIGN (vars_decl) = 1;
12541 DECL_ALIGN (funcs_decl) = TYPE_ALIGN (funcs_decl_type);
12542 DECL_ALIGN (vars_decl) = TYPE_ALIGN (vars_decl_type);
12543 DECL_INITIAL (funcs_decl) = ctor_f;
12544 DECL_INITIAL (vars_decl) = ctor_v;
12545 set_decl_section_name (funcs_decl, OFFLOAD_FUNC_TABLE_SECTION_NAME);
12546 set_decl_section_name (vars_decl, OFFLOAD_VAR_TABLE_SECTION_NAME);
12548 varpool_node::finalize_decl (vars_decl);
12549 varpool_node::finalize_decl (funcs_decl);
12551 else
12553 for (unsigned i = 0; i < num_funcs; i++)
12555 tree it = (*offload_funcs)[i];
12556 targetm.record_offload_symbol (it);
12558 for (unsigned i = 0; i < num_vars; i++)
12560 tree it = (*offload_vars)[i];
12561 targetm.record_offload_symbol (it);
12566 #include "gt-omp-low.h"