PR 61187 Fix use of uninitialized memory.
[official-gcc.git] / gcc / omp-low.c
bloba2a64ad1dcd7a047b8b3b2f786acb4f164bbf3e3
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "rtl.h"
32 #include "pointer-set.h"
33 #include "basic-block.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-fold.h"
37 #include "gimple-expr.h"
38 #include "is-a.h"
39 #include "gimple.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "gimple-walk.h"
44 #include "tree-iterator.h"
45 #include "tree-inline.h"
46 #include "langhooks.h"
47 #include "diagnostic-core.h"
48 #include "gimple-ssa.h"
49 #include "cgraph.h"
50 #include "tree-cfg.h"
51 #include "tree-phinodes.h"
52 #include "ssa-iterators.h"
53 #include "tree-ssanames.h"
54 #include "tree-into-ssa.h"
55 #include "expr.h"
56 #include "tree-dfa.h"
57 #include "tree-ssa.h"
58 #include "flags.h"
59 #include "function.h"
60 #include "expr.h"
61 #include "tree-pass.h"
62 #include "except.h"
63 #include "splay-tree.h"
64 #include "optabs.h"
65 #include "cfgloop.h"
66 #include "target.h"
67 #include "omp-low.h"
68 #include "gimple-low.h"
69 #include "tree-cfgcleanup.h"
70 #include "pretty-print.h"
71 #include "ipa-prop.h"
72 #include "tree-nested.h"
73 #include "tree-eh.h"
76 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
77 phases. The first phase scans the function looking for OMP statements
78 and then for variables that must be replaced to satisfy data sharing
79 clauses. The second phase expands code for the constructs, as well as
80 re-gimplifying things when variables have been replaced with complex
81 expressions.
83 Final code generation is done by pass_expand_omp. The flowgraph is
84 scanned for parallel regions which are then moved to a new
85 function, to be invoked by the thread library. */
87 /* Parallel region information. Every parallel and workshare
88 directive is enclosed between two markers, the OMP_* directive
89 and a corresponding OMP_RETURN statement. */
91 struct omp_region
93 /* The enclosing region. */
94 struct omp_region *outer;
96 /* First child region. */
97 struct omp_region *inner;
99 /* Next peer region. */
100 struct omp_region *next;
102 /* Block containing the omp directive as its last stmt. */
103 basic_block entry;
105 /* Block containing the OMP_RETURN as its last stmt. */
106 basic_block exit;
108 /* Block containing the OMP_CONTINUE as its last stmt. */
109 basic_block cont;
111 /* If this is a combined parallel+workshare region, this is a list
112 of additional arguments needed by the combined parallel+workshare
113 library call. */
114 vec<tree, va_gc> *ws_args;
116 /* The code for the omp directive of this region. */
117 enum gimple_code type;
119 /* Schedule kind, only used for OMP_FOR type regions. */
120 enum omp_clause_schedule_kind sched_kind;
122 /* True if this is a combined parallel+workshare region. */
123 bool is_combined_parallel;
126 /* Context structure. Used to store information about each parallel
127 directive in the code. */
129 typedef struct omp_context
131 /* This field must be at the beginning, as we do "inheritance": Some
132 callback functions for tree-inline.c (e.g., omp_copy_decl)
133 receive a copy_body_data pointer that is up-casted to an
134 omp_context pointer. */
135 copy_body_data cb;
137 /* The tree of contexts corresponding to the encountered constructs. */
138 struct omp_context *outer;
139 gimple stmt;
141 /* Map variables to fields in a structure that allows communication
142 between sending and receiving threads. */
143 splay_tree field_map;
144 tree record_type;
145 tree sender_decl;
146 tree receiver_decl;
148 /* These are used just by task contexts, if task firstprivate fn is
149 needed. srecord_type is used to communicate from the thread
150 that encountered the task construct to task firstprivate fn,
151 record_type is allocated by GOMP_task, initialized by task firstprivate
152 fn and passed to the task body fn. */
153 splay_tree sfield_map;
154 tree srecord_type;
156 /* A chain of variables to add to the top-level block surrounding the
157 construct. In the case of a parallel, this is in the child function. */
158 tree block_vars;
160 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
161 barriers should jump to during omplower pass. */
162 tree cancel_label;
164 /* What to do with variables with implicitly determined sharing
165 attributes. */
166 enum omp_clause_default_kind default_kind;
168 /* Nesting depth of this context. Used to beautify error messages re
169 invalid gotos. The outermost ctx is depth 1, with depth 0 being
170 reserved for the main body of the function. */
171 int depth;
173 /* True if this parallel directive is nested within another. */
174 bool is_nested;
176 /* True if this construct can be cancelled. */
177 bool cancellable;
178 } omp_context;
181 struct omp_for_data_loop
183 tree v, n1, n2, step;
184 enum tree_code cond_code;
187 /* A structure describing the main elements of a parallel loop. */
189 struct omp_for_data
191 struct omp_for_data_loop loop;
192 tree chunk_size;
193 gimple for_stmt;
194 tree pre, iter_type;
195 int collapse;
196 bool have_nowait, have_ordered;
197 enum omp_clause_schedule_kind sched_kind;
198 struct omp_for_data_loop *loops;
202 static splay_tree all_contexts;
203 static int taskreg_nesting_level;
204 static int target_nesting_level;
205 static struct omp_region *root_omp_region;
206 static bitmap task_shared_vars;
208 static void scan_omp (gimple_seq *, omp_context *);
209 static tree scan_omp_1_op (tree *, int *, void *);
211 #define WALK_SUBSTMTS \
212 case GIMPLE_BIND: \
213 case GIMPLE_TRY: \
214 case GIMPLE_CATCH: \
215 case GIMPLE_EH_FILTER: \
216 case GIMPLE_TRANSACTION: \
217 /* The sub-statements for these should be walked. */ \
218 *handled_ops_p = false; \
219 break;
221 /* Convenience function for calling scan_omp_1_op on tree operands. */
223 static inline tree
224 scan_omp_op (tree *tp, omp_context *ctx)
226 struct walk_stmt_info wi;
228 memset (&wi, 0, sizeof (wi));
229 wi.info = ctx;
230 wi.want_locations = true;
232 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
235 static void lower_omp (gimple_seq *, omp_context *);
236 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
237 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
239 /* Find an OpenMP clause of type KIND within CLAUSES. */
241 tree
242 find_omp_clause (tree clauses, enum omp_clause_code kind)
244 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
245 if (OMP_CLAUSE_CODE (clauses) == kind)
246 return clauses;
248 return NULL_TREE;
251 /* Return true if CTX is for an omp parallel. */
253 static inline bool
254 is_parallel_ctx (omp_context *ctx)
256 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
260 /* Return true if CTX is for an omp task. */
262 static inline bool
263 is_task_ctx (omp_context *ctx)
265 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
269 /* Return true if CTX is for an omp parallel or omp task. */
271 static inline bool
272 is_taskreg_ctx (omp_context *ctx)
274 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
275 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
279 /* Return true if REGION is a combined parallel+workshare region. */
281 static inline bool
282 is_combined_parallel (struct omp_region *region)
284 return region->is_combined_parallel;
288 /* Extract the header elements of parallel loop FOR_STMT and store
289 them into *FD. */
291 static void
292 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
293 struct omp_for_data_loop *loops)
295 tree t, var, *collapse_iter, *collapse_count;
296 tree count = NULL_TREE, iter_type = long_integer_type_node;
297 struct omp_for_data_loop *loop;
298 int i;
299 struct omp_for_data_loop dummy_loop;
300 location_t loc = gimple_location (for_stmt);
301 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_KIND_SIMD;
302 bool distribute = gimple_omp_for_kind (for_stmt)
303 == GF_OMP_FOR_KIND_DISTRIBUTE;
305 fd->for_stmt = for_stmt;
306 fd->pre = NULL;
307 fd->collapse = gimple_omp_for_collapse (for_stmt);
308 if (fd->collapse > 1)
309 fd->loops = loops;
310 else
311 fd->loops = &fd->loop;
313 fd->have_nowait = distribute || simd;
314 fd->have_ordered = false;
315 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
316 fd->chunk_size = NULL_TREE;
317 collapse_iter = NULL;
318 collapse_count = NULL;
320 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
321 switch (OMP_CLAUSE_CODE (t))
323 case OMP_CLAUSE_NOWAIT:
324 fd->have_nowait = true;
325 break;
326 case OMP_CLAUSE_ORDERED:
327 fd->have_ordered = true;
328 break;
329 case OMP_CLAUSE_SCHEDULE:
330 gcc_assert (!distribute);
331 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
332 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
333 break;
334 case OMP_CLAUSE_DIST_SCHEDULE:
335 gcc_assert (distribute);
336 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
337 break;
338 case OMP_CLAUSE_COLLAPSE:
339 if (fd->collapse > 1)
341 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
342 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
344 default:
345 break;
348 /* FIXME: for now map schedule(auto) to schedule(static).
349 There should be analysis to determine whether all iterations
350 are approximately the same amount of work (then schedule(static)
351 is best) or if it varies (then schedule(dynamic,N) is better). */
352 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
354 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
355 gcc_assert (fd->chunk_size == NULL);
357 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
358 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
359 gcc_assert (fd->chunk_size == NULL);
360 else if (fd->chunk_size == NULL)
362 /* We only need to compute a default chunk size for ordered
363 static loops and dynamic loops. */
364 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
365 || fd->have_ordered)
366 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
367 ? integer_zero_node : integer_one_node;
370 for (i = 0; i < fd->collapse; i++)
372 if (fd->collapse == 1)
373 loop = &fd->loop;
374 else if (loops != NULL)
375 loop = loops + i;
376 else
377 loop = &dummy_loop;
379 loop->v = gimple_omp_for_index (for_stmt, i);
380 gcc_assert (SSA_VAR_P (loop->v));
381 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
382 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
383 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
384 loop->n1 = gimple_omp_for_initial (for_stmt, i);
386 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
387 loop->n2 = gimple_omp_for_final (for_stmt, i);
388 switch (loop->cond_code)
390 case LT_EXPR:
391 case GT_EXPR:
392 break;
393 case NE_EXPR:
394 gcc_assert (gimple_omp_for_kind (for_stmt)
395 == GF_OMP_FOR_KIND_CILKSIMD);
396 break;
397 case LE_EXPR:
398 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
399 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
400 else
401 loop->n2 = fold_build2_loc (loc,
402 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
403 build_int_cst (TREE_TYPE (loop->n2), 1));
404 loop->cond_code = LT_EXPR;
405 break;
406 case GE_EXPR:
407 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
408 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
409 else
410 loop->n2 = fold_build2_loc (loc,
411 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
412 build_int_cst (TREE_TYPE (loop->n2), 1));
413 loop->cond_code = GT_EXPR;
414 break;
415 default:
416 gcc_unreachable ();
419 t = gimple_omp_for_incr (for_stmt, i);
420 gcc_assert (TREE_OPERAND (t, 0) == var);
421 switch (TREE_CODE (t))
423 case PLUS_EXPR:
424 loop->step = TREE_OPERAND (t, 1);
425 break;
426 case POINTER_PLUS_EXPR:
427 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
428 break;
429 case MINUS_EXPR:
430 loop->step = TREE_OPERAND (t, 1);
431 loop->step = fold_build1_loc (loc,
432 NEGATE_EXPR, TREE_TYPE (loop->step),
433 loop->step);
434 break;
435 default:
436 gcc_unreachable ();
439 if (simd
440 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
441 && !fd->have_ordered))
443 if (fd->collapse == 1)
444 iter_type = TREE_TYPE (loop->v);
445 else if (i == 0
446 || TYPE_PRECISION (iter_type)
447 < TYPE_PRECISION (TREE_TYPE (loop->v)))
448 iter_type
449 = build_nonstandard_integer_type
450 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
452 else if (iter_type != long_long_unsigned_type_node)
454 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
455 iter_type = long_long_unsigned_type_node;
456 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
457 && TYPE_PRECISION (TREE_TYPE (loop->v))
458 >= TYPE_PRECISION (iter_type))
460 tree n;
462 if (loop->cond_code == LT_EXPR)
463 n = fold_build2_loc (loc,
464 PLUS_EXPR, TREE_TYPE (loop->v),
465 loop->n2, loop->step);
466 else
467 n = loop->n1;
468 if (TREE_CODE (n) != INTEGER_CST
469 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
470 iter_type = long_long_unsigned_type_node;
472 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
473 > TYPE_PRECISION (iter_type))
475 tree n1, n2;
477 if (loop->cond_code == LT_EXPR)
479 n1 = loop->n1;
480 n2 = fold_build2_loc (loc,
481 PLUS_EXPR, TREE_TYPE (loop->v),
482 loop->n2, loop->step);
484 else
486 n1 = fold_build2_loc (loc,
487 MINUS_EXPR, TREE_TYPE (loop->v),
488 loop->n2, loop->step);
489 n2 = loop->n1;
491 if (TREE_CODE (n1) != INTEGER_CST
492 || TREE_CODE (n2) != INTEGER_CST
493 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
494 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
495 iter_type = long_long_unsigned_type_node;
499 if (collapse_count && *collapse_count == NULL)
501 t = fold_binary (loop->cond_code, boolean_type_node,
502 fold_convert (TREE_TYPE (loop->v), loop->n1),
503 fold_convert (TREE_TYPE (loop->v), loop->n2));
504 if (t && integer_zerop (t))
505 count = build_zero_cst (long_long_unsigned_type_node);
506 else if ((i == 0 || count != NULL_TREE)
507 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
508 && TREE_CONSTANT (loop->n1)
509 && TREE_CONSTANT (loop->n2)
510 && TREE_CODE (loop->step) == INTEGER_CST)
512 tree itype = TREE_TYPE (loop->v);
514 if (POINTER_TYPE_P (itype))
515 itype = signed_type_for (itype);
516 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
517 t = fold_build2_loc (loc,
518 PLUS_EXPR, itype,
519 fold_convert_loc (loc, itype, loop->step), t);
520 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
521 fold_convert_loc (loc, itype, loop->n2));
522 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
523 fold_convert_loc (loc, itype, loop->n1));
524 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
525 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
526 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
527 fold_build1_loc (loc, NEGATE_EXPR, itype,
528 fold_convert_loc (loc, itype,
529 loop->step)));
530 else
531 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
532 fold_convert_loc (loc, itype, loop->step));
533 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
534 if (count != NULL_TREE)
535 count = fold_build2_loc (loc,
536 MULT_EXPR, long_long_unsigned_type_node,
537 count, t);
538 else
539 count = t;
540 if (TREE_CODE (count) != INTEGER_CST)
541 count = NULL_TREE;
543 else if (count && !integer_zerop (count))
544 count = NULL_TREE;
548 if (count
549 && !simd
550 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
551 || fd->have_ordered))
553 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
554 iter_type = long_long_unsigned_type_node;
555 else
556 iter_type = long_integer_type_node;
558 else if (collapse_iter && *collapse_iter != NULL)
559 iter_type = TREE_TYPE (*collapse_iter);
560 fd->iter_type = iter_type;
561 if (collapse_iter && *collapse_iter == NULL)
562 *collapse_iter = create_tmp_var (iter_type, ".iter");
563 if (collapse_count && *collapse_count == NULL)
565 if (count)
566 *collapse_count = fold_convert_loc (loc, iter_type, count);
567 else
568 *collapse_count = create_tmp_var (iter_type, ".count");
571 if (fd->collapse > 1)
573 fd->loop.v = *collapse_iter;
574 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
575 fd->loop.n2 = *collapse_count;
576 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
577 fd->loop.cond_code = LT_EXPR;
582 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
583 is the immediate dominator of PAR_ENTRY_BB, return true if there
584 are no data dependencies that would prevent expanding the parallel
585 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
587 When expanding a combined parallel+workshare region, the call to
588 the child function may need additional arguments in the case of
589 GIMPLE_OMP_FOR regions. In some cases, these arguments are
590 computed out of variables passed in from the parent to the child
591 via 'struct .omp_data_s'. For instance:
593 #pragma omp parallel for schedule (guided, i * 4)
594 for (j ...)
596 Is lowered into:
598 # BLOCK 2 (PAR_ENTRY_BB)
599 .omp_data_o.i = i;
600 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
602 # BLOCK 3 (WS_ENTRY_BB)
603 .omp_data_i = &.omp_data_o;
604 D.1667 = .omp_data_i->i;
605 D.1598 = D.1667 * 4;
606 #pragma omp for schedule (guided, D.1598)
608 When we outline the parallel region, the call to the child function
609 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
610 that value is computed *after* the call site. So, in principle we
611 cannot do the transformation.
613 To see whether the code in WS_ENTRY_BB blocks the combined
614 parallel+workshare call, we collect all the variables used in the
615 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
616 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
617 call.
619 FIXME. If we had the SSA form built at this point, we could merely
620 hoist the code in block 3 into block 2 and be done with it. But at
621 this point we don't have dataflow information and though we could
622 hack something up here, it is really not worth the aggravation. */
624 static bool
625 workshare_safe_to_combine_p (basic_block ws_entry_bb)
627 struct omp_for_data fd;
628 gimple ws_stmt = last_stmt (ws_entry_bb);
630 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
631 return true;
633 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
635 extract_omp_for_data (ws_stmt, &fd, NULL);
637 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
638 return false;
639 if (fd.iter_type != long_integer_type_node)
640 return false;
642 /* FIXME. We give up too easily here. If any of these arguments
643 are not constants, they will likely involve variables that have
644 been mapped into fields of .omp_data_s for sharing with the child
645 function. With appropriate data flow, it would be possible to
646 see through this. */
647 if (!is_gimple_min_invariant (fd.loop.n1)
648 || !is_gimple_min_invariant (fd.loop.n2)
649 || !is_gimple_min_invariant (fd.loop.step)
650 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
651 return false;
653 return true;
657 /* Collect additional arguments needed to emit a combined
658 parallel+workshare call. WS_STMT is the workshare directive being
659 expanded. */
661 static vec<tree, va_gc> *
662 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
664 tree t;
665 location_t loc = gimple_location (ws_stmt);
666 vec<tree, va_gc> *ws_args;
668 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
670 struct omp_for_data fd;
671 tree n1, n2;
673 extract_omp_for_data (ws_stmt, &fd, NULL);
674 n1 = fd.loop.n1;
675 n2 = fd.loop.n2;
677 if (gimple_omp_for_combined_into_p (ws_stmt))
679 tree innerc
680 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
681 OMP_CLAUSE__LOOPTEMP_);
682 gcc_assert (innerc);
683 n1 = OMP_CLAUSE_DECL (innerc);
684 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
685 OMP_CLAUSE__LOOPTEMP_);
686 gcc_assert (innerc);
687 n2 = OMP_CLAUSE_DECL (innerc);
690 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
692 t = fold_convert_loc (loc, long_integer_type_node, n1);
693 ws_args->quick_push (t);
695 t = fold_convert_loc (loc, long_integer_type_node, n2);
696 ws_args->quick_push (t);
698 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
699 ws_args->quick_push (t);
701 if (fd.chunk_size)
703 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
704 ws_args->quick_push (t);
707 return ws_args;
709 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
711 /* Number of sections is equal to the number of edges from the
712 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
713 the exit of the sections region. */
714 basic_block bb = single_succ (gimple_bb (ws_stmt));
715 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
716 vec_alloc (ws_args, 1);
717 ws_args->quick_push (t);
718 return ws_args;
721 gcc_unreachable ();
725 /* Discover whether REGION is a combined parallel+workshare region. */
727 static void
728 determine_parallel_type (struct omp_region *region)
730 basic_block par_entry_bb, par_exit_bb;
731 basic_block ws_entry_bb, ws_exit_bb;
733 if (region == NULL || region->inner == NULL
734 || region->exit == NULL || region->inner->exit == NULL
735 || region->inner->cont == NULL)
736 return;
738 /* We only support parallel+for and parallel+sections. */
739 if (region->type != GIMPLE_OMP_PARALLEL
740 || (region->inner->type != GIMPLE_OMP_FOR
741 && region->inner->type != GIMPLE_OMP_SECTIONS))
742 return;
744 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
745 WS_EXIT_BB -> PAR_EXIT_BB. */
746 par_entry_bb = region->entry;
747 par_exit_bb = region->exit;
748 ws_entry_bb = region->inner->entry;
749 ws_exit_bb = region->inner->exit;
751 if (single_succ (par_entry_bb) == ws_entry_bb
752 && single_succ (ws_exit_bb) == par_exit_bb
753 && workshare_safe_to_combine_p (ws_entry_bb)
754 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
755 || (last_and_only_stmt (ws_entry_bb)
756 && last_and_only_stmt (par_exit_bb))))
758 gimple par_stmt = last_stmt (par_entry_bb);
759 gimple ws_stmt = last_stmt (ws_entry_bb);
761 if (region->inner->type == GIMPLE_OMP_FOR)
763 /* If this is a combined parallel loop, we need to determine
764 whether or not to use the combined library calls. There
765 are two cases where we do not apply the transformation:
766 static loops and any kind of ordered loop. In the first
767 case, we already open code the loop so there is no need
768 to do anything else. In the latter case, the combined
769 parallel loop call would still need extra synchronization
770 to implement ordered semantics, so there would not be any
771 gain in using the combined call. */
772 tree clauses = gimple_omp_for_clauses (ws_stmt);
773 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
774 if (c == NULL
775 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
776 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
778 region->is_combined_parallel = false;
779 region->inner->is_combined_parallel = false;
780 return;
784 region->is_combined_parallel = true;
785 region->inner->is_combined_parallel = true;
786 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
791 /* Return true if EXPR is variable sized. */
793 static inline bool
794 is_variable_sized (const_tree expr)
796 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
799 /* Return true if DECL is a reference type. */
801 static inline bool
802 is_reference (tree decl)
804 return lang_hooks.decls.omp_privatize_by_reference (decl);
807 /* Lookup variables in the decl or field splay trees. The "maybe" form
808 allows for the variable form to not have been entered, otherwise we
809 assert that the variable must have been entered. */
811 static inline tree
812 lookup_decl (tree var, omp_context *ctx)
814 tree *n;
815 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
816 return *n;
819 static inline tree
820 maybe_lookup_decl (const_tree var, omp_context *ctx)
822 tree *n;
823 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
824 return n ? *n : NULL_TREE;
827 static inline tree
828 lookup_field (tree var, omp_context *ctx)
830 splay_tree_node n;
831 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
832 return (tree) n->value;
835 static inline tree
836 lookup_sfield (tree var, omp_context *ctx)
838 splay_tree_node n;
839 n = splay_tree_lookup (ctx->sfield_map
840 ? ctx->sfield_map : ctx->field_map,
841 (splay_tree_key) var);
842 return (tree) n->value;
845 static inline tree
846 maybe_lookup_field (tree var, omp_context *ctx)
848 splay_tree_node n;
849 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
850 return n ? (tree) n->value : NULL_TREE;
853 /* Return true if DECL should be copied by pointer. SHARED_CTX is
854 the parallel context if DECL is to be shared. */
856 static bool
857 use_pointer_for_field (tree decl, omp_context *shared_ctx)
859 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
860 return true;
862 /* We can only use copy-in/copy-out semantics for shared variables
863 when we know the value is not accessible from an outer scope. */
864 if (shared_ctx)
866 /* ??? Trivially accessible from anywhere. But why would we even
867 be passing an address in this case? Should we simply assert
868 this to be false, or should we have a cleanup pass that removes
869 these from the list of mappings? */
870 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
871 return true;
873 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
874 without analyzing the expression whether or not its location
875 is accessible to anyone else. In the case of nested parallel
876 regions it certainly may be. */
877 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
878 return true;
880 /* Do not use copy-in/copy-out for variables that have their
881 address taken. */
882 if (TREE_ADDRESSABLE (decl))
883 return true;
885 /* lower_send_shared_vars only uses copy-in, but not copy-out
886 for these. */
887 if (TREE_READONLY (decl)
888 || ((TREE_CODE (decl) == RESULT_DECL
889 || TREE_CODE (decl) == PARM_DECL)
890 && DECL_BY_REFERENCE (decl)))
891 return false;
893 /* Disallow copy-in/out in nested parallel if
894 decl is shared in outer parallel, otherwise
895 each thread could store the shared variable
896 in its own copy-in location, making the
897 variable no longer really shared. */
898 if (shared_ctx->is_nested)
900 omp_context *up;
902 for (up = shared_ctx->outer; up; up = up->outer)
903 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
904 break;
906 if (up)
908 tree c;
910 for (c = gimple_omp_taskreg_clauses (up->stmt);
911 c; c = OMP_CLAUSE_CHAIN (c))
912 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
913 && OMP_CLAUSE_DECL (c) == decl)
914 break;
916 if (c)
917 goto maybe_mark_addressable_and_ret;
921 /* For tasks avoid using copy-in/out. As tasks can be
922 deferred or executed in different thread, when GOMP_task
923 returns, the task hasn't necessarily terminated. */
924 if (is_task_ctx (shared_ctx))
926 tree outer;
927 maybe_mark_addressable_and_ret:
928 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
929 if (is_gimple_reg (outer))
931 /* Taking address of OUTER in lower_send_shared_vars
932 might need regimplification of everything that uses the
933 variable. */
934 if (!task_shared_vars)
935 task_shared_vars = BITMAP_ALLOC (NULL);
936 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
937 TREE_ADDRESSABLE (outer) = 1;
939 return true;
943 return false;
946 /* Construct a new automatic decl similar to VAR. */
948 static tree
949 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
951 tree copy = copy_var_decl (var, name, type);
953 DECL_CONTEXT (copy) = current_function_decl;
954 DECL_CHAIN (copy) = ctx->block_vars;
955 ctx->block_vars = copy;
957 return copy;
960 static tree
961 omp_copy_decl_1 (tree var, omp_context *ctx)
963 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
966 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
967 as appropriate. */
968 static tree
969 omp_build_component_ref (tree obj, tree field)
971 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
972 if (TREE_THIS_VOLATILE (field))
973 TREE_THIS_VOLATILE (ret) |= 1;
974 if (TREE_READONLY (field))
975 TREE_READONLY (ret) |= 1;
976 return ret;
979 /* Build tree nodes to access the field for VAR on the receiver side. */
981 static tree
982 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
984 tree x, field = lookup_field (var, ctx);
986 /* If the receiver record type was remapped in the child function,
987 remap the field into the new record type. */
988 x = maybe_lookup_field (field, ctx);
989 if (x != NULL)
990 field = x;
992 x = build_simple_mem_ref (ctx->receiver_decl);
993 x = omp_build_component_ref (x, field);
994 if (by_ref)
995 x = build_simple_mem_ref (x);
997 return x;
1000 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1001 of a parallel, this is a component reference; for workshare constructs
1002 this is some variable. */
1004 static tree
1005 build_outer_var_ref (tree var, omp_context *ctx)
1007 tree x;
1009 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1010 x = var;
1011 else if (is_variable_sized (var))
1013 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1014 x = build_outer_var_ref (x, ctx);
1015 x = build_simple_mem_ref (x);
1017 else if (is_taskreg_ctx (ctx))
1019 bool by_ref = use_pointer_for_field (var, NULL);
1020 x = build_receiver_ref (var, by_ref, ctx);
1022 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1023 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
1025 /* #pragma omp simd isn't a worksharing construct, and can reference even
1026 private vars in its linear etc. clauses. */
1027 x = NULL_TREE;
1028 if (ctx->outer && is_taskreg_ctx (ctx))
1029 x = lookup_decl (var, ctx->outer);
1030 else if (ctx->outer)
1031 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1032 if (x == NULL_TREE)
1033 x = var;
1035 else if (ctx->outer)
1036 x = lookup_decl (var, ctx->outer);
1037 else if (is_reference (var))
1038 /* This can happen with orphaned constructs. If var is reference, it is
1039 possible it is shared and as such valid. */
1040 x = var;
1041 else
1042 gcc_unreachable ();
1044 if (is_reference (var))
1045 x = build_simple_mem_ref (x);
1047 return x;
1050 /* Build tree nodes to access the field for VAR on the sender side. */
1052 static tree
1053 build_sender_ref (tree var, omp_context *ctx)
1055 tree field = lookup_sfield (var, ctx);
1056 return omp_build_component_ref (ctx->sender_decl, field);
1059 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1061 static void
1062 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1064 tree field, type, sfield = NULL_TREE;
1066 gcc_assert ((mask & 1) == 0
1067 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1068 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1069 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1071 type = TREE_TYPE (var);
1072 if (mask & 4)
1074 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1075 type = build_pointer_type (build_pointer_type (type));
1077 else if (by_ref)
1078 type = build_pointer_type (type);
1079 else if ((mask & 3) == 1 && is_reference (var))
1080 type = TREE_TYPE (type);
1082 field = build_decl (DECL_SOURCE_LOCATION (var),
1083 FIELD_DECL, DECL_NAME (var), type);
1085 /* Remember what variable this field was created for. This does have a
1086 side effect of making dwarf2out ignore this member, so for helpful
1087 debugging we clear it later in delete_omp_context. */
1088 DECL_ABSTRACT_ORIGIN (field) = var;
1089 if (type == TREE_TYPE (var))
1091 DECL_ALIGN (field) = DECL_ALIGN (var);
1092 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1093 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1095 else
1096 DECL_ALIGN (field) = TYPE_ALIGN (type);
1098 if ((mask & 3) == 3)
1100 insert_field_into_struct (ctx->record_type, field);
1101 if (ctx->srecord_type)
1103 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1104 FIELD_DECL, DECL_NAME (var), type);
1105 DECL_ABSTRACT_ORIGIN (sfield) = var;
1106 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1107 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1108 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1109 insert_field_into_struct (ctx->srecord_type, sfield);
1112 else
1114 if (ctx->srecord_type == NULL_TREE)
1116 tree t;
1118 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1119 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1120 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1122 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1123 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1124 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1125 insert_field_into_struct (ctx->srecord_type, sfield);
1126 splay_tree_insert (ctx->sfield_map,
1127 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1128 (splay_tree_value) sfield);
1131 sfield = field;
1132 insert_field_into_struct ((mask & 1) ? ctx->record_type
1133 : ctx->srecord_type, field);
1136 if (mask & 1)
1137 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1138 (splay_tree_value) field);
1139 if ((mask & 2) && ctx->sfield_map)
1140 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1141 (splay_tree_value) sfield);
1144 static tree
1145 install_var_local (tree var, omp_context *ctx)
1147 tree new_var = omp_copy_decl_1 (var, ctx);
1148 insert_decl_map (&ctx->cb, var, new_var);
1149 return new_var;
1152 /* Adjust the replacement for DECL in CTX for the new context. This means
1153 copying the DECL_VALUE_EXPR, and fixing up the type. */
1155 static void
1156 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1158 tree new_decl, size;
1160 new_decl = lookup_decl (decl, ctx);
1162 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1164 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1165 && DECL_HAS_VALUE_EXPR_P (decl))
1167 tree ve = DECL_VALUE_EXPR (decl);
1168 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1169 SET_DECL_VALUE_EXPR (new_decl, ve);
1170 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1173 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1175 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1176 if (size == error_mark_node)
1177 size = TYPE_SIZE (TREE_TYPE (new_decl));
1178 DECL_SIZE (new_decl) = size;
1180 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1181 if (size == error_mark_node)
1182 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1183 DECL_SIZE_UNIT (new_decl) = size;
1187 /* The callback for remap_decl. Search all containing contexts for a
1188 mapping of the variable; this avoids having to duplicate the splay
1189 tree ahead of time. We know a mapping doesn't already exist in the
1190 given context. Create new mappings to implement default semantics. */
1192 static tree
1193 omp_copy_decl (tree var, copy_body_data *cb)
1195 omp_context *ctx = (omp_context *) cb;
1196 tree new_var;
1198 if (TREE_CODE (var) == LABEL_DECL)
1200 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1201 DECL_CONTEXT (new_var) = current_function_decl;
1202 insert_decl_map (&ctx->cb, var, new_var);
1203 return new_var;
1206 while (!is_taskreg_ctx (ctx))
1208 ctx = ctx->outer;
1209 if (ctx == NULL)
1210 return var;
1211 new_var = maybe_lookup_decl (var, ctx);
1212 if (new_var)
1213 return new_var;
1216 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1217 return var;
1219 return error_mark_node;
1223 /* Debugging dumps for parallel regions. */
1224 void dump_omp_region (FILE *, struct omp_region *, int);
1225 void debug_omp_region (struct omp_region *);
1226 void debug_all_omp_regions (void);
1228 /* Dump the parallel region tree rooted at REGION. */
1230 void
1231 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1233 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1234 gimple_code_name[region->type]);
1236 if (region->inner)
1237 dump_omp_region (file, region->inner, indent + 4);
1239 if (region->cont)
1241 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1242 region->cont->index);
1245 if (region->exit)
1246 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1247 region->exit->index);
1248 else
1249 fprintf (file, "%*s[no exit marker]\n", indent, "");
1251 if (region->next)
1252 dump_omp_region (file, region->next, indent);
1255 DEBUG_FUNCTION void
1256 debug_omp_region (struct omp_region *region)
1258 dump_omp_region (stderr, region, 0);
1261 DEBUG_FUNCTION void
1262 debug_all_omp_regions (void)
1264 dump_omp_region (stderr, root_omp_region, 0);
1268 /* Create a new parallel region starting at STMT inside region PARENT. */
1270 static struct omp_region *
1271 new_omp_region (basic_block bb, enum gimple_code type,
1272 struct omp_region *parent)
1274 struct omp_region *region = XCNEW (struct omp_region);
1276 region->outer = parent;
1277 region->entry = bb;
1278 region->type = type;
1280 if (parent)
1282 /* This is a nested region. Add it to the list of inner
1283 regions in PARENT. */
1284 region->next = parent->inner;
1285 parent->inner = region;
1287 else
1289 /* This is a toplevel region. Add it to the list of toplevel
1290 regions in ROOT_OMP_REGION. */
1291 region->next = root_omp_region;
1292 root_omp_region = region;
1295 return region;
1298 /* Release the memory associated with the region tree rooted at REGION. */
1300 static void
1301 free_omp_region_1 (struct omp_region *region)
1303 struct omp_region *i, *n;
1305 for (i = region->inner; i ; i = n)
1307 n = i->next;
1308 free_omp_region_1 (i);
1311 free (region);
1314 /* Release the memory for the entire omp region tree. */
1316 void
1317 free_omp_regions (void)
1319 struct omp_region *r, *n;
1320 for (r = root_omp_region; r ; r = n)
1322 n = r->next;
1323 free_omp_region_1 (r);
1325 root_omp_region = NULL;
1329 /* Create a new context, with OUTER_CTX being the surrounding context. */
1331 static omp_context *
1332 new_omp_context (gimple stmt, omp_context *outer_ctx)
1334 omp_context *ctx = XCNEW (omp_context);
1336 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1337 (splay_tree_value) ctx);
1338 ctx->stmt = stmt;
1340 if (outer_ctx)
1342 ctx->outer = outer_ctx;
1343 ctx->cb = outer_ctx->cb;
1344 ctx->cb.block = NULL;
1345 ctx->depth = outer_ctx->depth + 1;
1347 else
1349 ctx->cb.src_fn = current_function_decl;
1350 ctx->cb.dst_fn = current_function_decl;
1351 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1352 gcc_checking_assert (ctx->cb.src_node);
1353 ctx->cb.dst_node = ctx->cb.src_node;
1354 ctx->cb.src_cfun = cfun;
1355 ctx->cb.copy_decl = omp_copy_decl;
1356 ctx->cb.eh_lp_nr = 0;
1357 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1358 ctx->depth = 1;
1361 ctx->cb.decl_map = pointer_map_create ();
1363 return ctx;
1366 static gimple_seq maybe_catch_exception (gimple_seq);
1368 /* Finalize task copyfn. */
1370 static void
1371 finalize_task_copyfn (gimple task_stmt)
1373 struct function *child_cfun;
1374 tree child_fn;
1375 gimple_seq seq = NULL, new_seq;
1376 gimple bind;
1378 child_fn = gimple_omp_task_copy_fn (task_stmt);
1379 if (child_fn == NULL_TREE)
1380 return;
1382 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1383 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1385 push_cfun (child_cfun);
1386 bind = gimplify_body (child_fn, false);
1387 gimple_seq_add_stmt (&seq, bind);
1388 new_seq = maybe_catch_exception (seq);
1389 if (new_seq != seq)
1391 bind = gimple_build_bind (NULL, new_seq, NULL);
1392 seq = NULL;
1393 gimple_seq_add_stmt (&seq, bind);
1395 gimple_set_body (child_fn, seq);
1396 pop_cfun ();
1398 /* Inform the callgraph about the new function. */
1399 cgraph_add_new_function (child_fn, false);
1402 /* Destroy a omp_context data structures. Called through the splay tree
1403 value delete callback. */
1405 static void
1406 delete_omp_context (splay_tree_value value)
1408 omp_context *ctx = (omp_context *) value;
1410 pointer_map_destroy (ctx->cb.decl_map);
1412 if (ctx->field_map)
1413 splay_tree_delete (ctx->field_map);
1414 if (ctx->sfield_map)
1415 splay_tree_delete (ctx->sfield_map);
1417 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1418 it produces corrupt debug information. */
1419 if (ctx->record_type)
1421 tree t;
1422 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1423 DECL_ABSTRACT_ORIGIN (t) = NULL;
1425 if (ctx->srecord_type)
1427 tree t;
1428 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1429 DECL_ABSTRACT_ORIGIN (t) = NULL;
1432 if (is_task_ctx (ctx))
1433 finalize_task_copyfn (ctx->stmt);
1435 XDELETE (ctx);
1438 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1439 context. */
1441 static void
1442 fixup_child_record_type (omp_context *ctx)
1444 tree f, type = ctx->record_type;
1446 /* ??? It isn't sufficient to just call remap_type here, because
1447 variably_modified_type_p doesn't work the way we expect for
1448 record types. Testing each field for whether it needs remapping
1449 and creating a new record by hand works, however. */
1450 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1451 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1452 break;
1453 if (f)
1455 tree name, new_fields = NULL;
1457 type = lang_hooks.types.make_type (RECORD_TYPE);
1458 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1459 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1460 TYPE_DECL, name, type);
1461 TYPE_NAME (type) = name;
1463 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1465 tree new_f = copy_node (f);
1466 DECL_CONTEXT (new_f) = type;
1467 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1468 DECL_CHAIN (new_f) = new_fields;
1469 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1470 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1471 &ctx->cb, NULL);
1472 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1473 &ctx->cb, NULL);
1474 new_fields = new_f;
1476 /* Arrange to be able to look up the receiver field
1477 given the sender field. */
1478 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1479 (splay_tree_value) new_f);
1481 TYPE_FIELDS (type) = nreverse (new_fields);
1482 layout_type (type);
1485 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1488 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1489 specified by CLAUSES. */
1491 static void
1492 scan_sharing_clauses (tree clauses, omp_context *ctx)
1494 tree c, decl;
1495 bool scan_array_reductions = false;
1497 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1499 bool by_ref;
1501 switch (OMP_CLAUSE_CODE (c))
1503 case OMP_CLAUSE_PRIVATE:
1504 decl = OMP_CLAUSE_DECL (c);
1505 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1506 goto do_private;
1507 else if (!is_variable_sized (decl))
1508 install_var_local (decl, ctx);
1509 break;
1511 case OMP_CLAUSE_SHARED:
1512 /* Ignore shared directives in teams construct. */
1513 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1514 break;
1515 gcc_assert (is_taskreg_ctx (ctx));
1516 decl = OMP_CLAUSE_DECL (c);
1517 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1518 || !is_variable_sized (decl));
1519 /* Global variables don't need to be copied,
1520 the receiver side will use them directly. */
1521 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1522 break;
1523 by_ref = use_pointer_for_field (decl, ctx);
1524 if (! TREE_READONLY (decl)
1525 || TREE_ADDRESSABLE (decl)
1526 || by_ref
1527 || is_reference (decl))
1529 install_var_field (decl, by_ref, 3, ctx);
1530 install_var_local (decl, ctx);
1531 break;
1533 /* We don't need to copy const scalar vars back. */
1534 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1535 goto do_private;
1537 case OMP_CLAUSE_LASTPRIVATE:
1538 /* Let the corresponding firstprivate clause create
1539 the variable. */
1540 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1541 break;
1542 /* FALLTHRU */
1544 case OMP_CLAUSE_FIRSTPRIVATE:
1545 case OMP_CLAUSE_REDUCTION:
1546 case OMP_CLAUSE_LINEAR:
1547 decl = OMP_CLAUSE_DECL (c);
1548 do_private:
1549 if (is_variable_sized (decl))
1551 if (is_task_ctx (ctx))
1552 install_var_field (decl, false, 1, ctx);
1553 break;
1555 else if (is_taskreg_ctx (ctx))
1557 bool global
1558 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1559 by_ref = use_pointer_for_field (decl, NULL);
1561 if (is_task_ctx (ctx)
1562 && (global || by_ref || is_reference (decl)))
1564 install_var_field (decl, false, 1, ctx);
1565 if (!global)
1566 install_var_field (decl, by_ref, 2, ctx);
1568 else if (!global)
1569 install_var_field (decl, by_ref, 3, ctx);
1571 install_var_local (decl, ctx);
1572 break;
1574 case OMP_CLAUSE__LOOPTEMP_:
1575 gcc_assert (is_parallel_ctx (ctx));
1576 decl = OMP_CLAUSE_DECL (c);
1577 install_var_field (decl, false, 3, ctx);
1578 install_var_local (decl, ctx);
1579 break;
1581 case OMP_CLAUSE_COPYPRIVATE:
1582 case OMP_CLAUSE_COPYIN:
1583 decl = OMP_CLAUSE_DECL (c);
1584 by_ref = use_pointer_for_field (decl, NULL);
1585 install_var_field (decl, by_ref, 3, ctx);
1586 break;
1588 case OMP_CLAUSE_DEFAULT:
1589 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1590 break;
1592 case OMP_CLAUSE_FINAL:
1593 case OMP_CLAUSE_IF:
1594 case OMP_CLAUSE_NUM_THREADS:
1595 case OMP_CLAUSE_NUM_TEAMS:
1596 case OMP_CLAUSE_THREAD_LIMIT:
1597 case OMP_CLAUSE_DEVICE:
1598 case OMP_CLAUSE_SCHEDULE:
1599 case OMP_CLAUSE_DIST_SCHEDULE:
1600 case OMP_CLAUSE_DEPEND:
1601 if (ctx->outer)
1602 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1603 break;
1605 case OMP_CLAUSE_TO:
1606 case OMP_CLAUSE_FROM:
1607 case OMP_CLAUSE_MAP:
1608 if (ctx->outer)
1609 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1610 decl = OMP_CLAUSE_DECL (c);
1611 /* Global variables with "omp declare target" attribute
1612 don't need to be copied, the receiver side will use them
1613 directly. */
1614 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1615 && DECL_P (decl)
1616 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1617 && lookup_attribute ("omp declare target",
1618 DECL_ATTRIBUTES (decl)))
1619 break;
1620 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1621 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1623 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1624 #pragma omp target data, there is nothing to map for
1625 those. */
1626 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1627 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1628 break;
1630 if (DECL_P (decl))
1632 if (DECL_SIZE (decl)
1633 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1635 tree decl2 = DECL_VALUE_EXPR (decl);
1636 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1637 decl2 = TREE_OPERAND (decl2, 0);
1638 gcc_assert (DECL_P (decl2));
1639 install_var_field (decl2, true, 3, ctx);
1640 install_var_local (decl2, ctx);
1641 install_var_local (decl, ctx);
1643 else
1645 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1646 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1647 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1648 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1649 install_var_field (decl, true, 7, ctx);
1650 else
1651 install_var_field (decl, true, 3, ctx);
1652 if (gimple_omp_target_kind (ctx->stmt)
1653 == GF_OMP_TARGET_KIND_REGION)
1654 install_var_local (decl, ctx);
1657 else
1659 tree base = get_base_address (decl);
1660 tree nc = OMP_CLAUSE_CHAIN (c);
1661 if (DECL_P (base)
1662 && nc != NULL_TREE
1663 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1664 && OMP_CLAUSE_DECL (nc) == base
1665 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1666 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1668 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1669 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1671 else
1673 gcc_assert (!splay_tree_lookup (ctx->field_map,
1674 (splay_tree_key) decl));
1675 tree field
1676 = build_decl (OMP_CLAUSE_LOCATION (c),
1677 FIELD_DECL, NULL_TREE, ptr_type_node);
1678 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1679 insert_field_into_struct (ctx->record_type, field);
1680 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1681 (splay_tree_value) field);
1684 break;
1686 case OMP_CLAUSE_NOWAIT:
1687 case OMP_CLAUSE_ORDERED:
1688 case OMP_CLAUSE_COLLAPSE:
1689 case OMP_CLAUSE_UNTIED:
1690 case OMP_CLAUSE_MERGEABLE:
1691 case OMP_CLAUSE_PROC_BIND:
1692 case OMP_CLAUSE_SAFELEN:
1693 break;
1695 case OMP_CLAUSE_ALIGNED:
1696 decl = OMP_CLAUSE_DECL (c);
1697 if (is_global_var (decl)
1698 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1699 install_var_local (decl, ctx);
1700 break;
1702 default:
1703 gcc_unreachable ();
1707 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1709 switch (OMP_CLAUSE_CODE (c))
1711 case OMP_CLAUSE_LASTPRIVATE:
1712 /* Let the corresponding firstprivate clause create
1713 the variable. */
1714 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1715 scan_array_reductions = true;
1716 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1717 break;
1718 /* FALLTHRU */
1720 case OMP_CLAUSE_PRIVATE:
1721 case OMP_CLAUSE_FIRSTPRIVATE:
1722 case OMP_CLAUSE_REDUCTION:
1723 case OMP_CLAUSE_LINEAR:
1724 decl = OMP_CLAUSE_DECL (c);
1725 if (is_variable_sized (decl))
1726 install_var_local (decl, ctx);
1727 fixup_remapped_decl (decl, ctx,
1728 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1729 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1730 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1731 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1732 scan_array_reductions = true;
1733 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1734 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1735 scan_array_reductions = true;
1736 break;
1738 case OMP_CLAUSE_SHARED:
1739 /* Ignore shared directives in teams construct. */
1740 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1741 break;
1742 decl = OMP_CLAUSE_DECL (c);
1743 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1744 fixup_remapped_decl (decl, ctx, false);
1745 break;
1747 case OMP_CLAUSE_MAP:
1748 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1749 break;
1750 decl = OMP_CLAUSE_DECL (c);
1751 if (DECL_P (decl)
1752 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1753 && lookup_attribute ("omp declare target",
1754 DECL_ATTRIBUTES (decl)))
1755 break;
1756 if (DECL_P (decl))
1758 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1759 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1760 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1762 tree new_decl = lookup_decl (decl, ctx);
1763 TREE_TYPE (new_decl)
1764 = remap_type (TREE_TYPE (decl), &ctx->cb);
1766 else if (DECL_SIZE (decl)
1767 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1769 tree decl2 = DECL_VALUE_EXPR (decl);
1770 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1771 decl2 = TREE_OPERAND (decl2, 0);
1772 gcc_assert (DECL_P (decl2));
1773 fixup_remapped_decl (decl2, ctx, false);
1774 fixup_remapped_decl (decl, ctx, true);
1776 else
1777 fixup_remapped_decl (decl, ctx, false);
1779 break;
1781 case OMP_CLAUSE_COPYPRIVATE:
1782 case OMP_CLAUSE_COPYIN:
1783 case OMP_CLAUSE_DEFAULT:
1784 case OMP_CLAUSE_IF:
1785 case OMP_CLAUSE_NUM_THREADS:
1786 case OMP_CLAUSE_NUM_TEAMS:
1787 case OMP_CLAUSE_THREAD_LIMIT:
1788 case OMP_CLAUSE_DEVICE:
1789 case OMP_CLAUSE_SCHEDULE:
1790 case OMP_CLAUSE_DIST_SCHEDULE:
1791 case OMP_CLAUSE_NOWAIT:
1792 case OMP_CLAUSE_ORDERED:
1793 case OMP_CLAUSE_COLLAPSE:
1794 case OMP_CLAUSE_UNTIED:
1795 case OMP_CLAUSE_FINAL:
1796 case OMP_CLAUSE_MERGEABLE:
1797 case OMP_CLAUSE_PROC_BIND:
1798 case OMP_CLAUSE_SAFELEN:
1799 case OMP_CLAUSE_ALIGNED:
1800 case OMP_CLAUSE_DEPEND:
1801 case OMP_CLAUSE__LOOPTEMP_:
1802 case OMP_CLAUSE_TO:
1803 case OMP_CLAUSE_FROM:
1804 break;
1806 default:
1807 gcc_unreachable ();
1811 if (scan_array_reductions)
1812 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1813 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1814 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1816 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1817 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1819 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1820 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1821 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1822 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1823 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1824 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1827 /* Create a new name for omp child function. Returns an identifier. */
1829 static tree
1830 create_omp_child_function_name (bool task_copy)
1832 return (clone_function_name (current_function_decl,
1833 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1836 /* Build a decl for the omp child function. It'll not contain a body
1837 yet, just the bare decl. */
1839 static void
1840 create_omp_child_function (omp_context *ctx, bool task_copy)
1842 tree decl, type, name, t;
1844 name = create_omp_child_function_name (task_copy);
1845 if (task_copy)
1846 type = build_function_type_list (void_type_node, ptr_type_node,
1847 ptr_type_node, NULL_TREE);
1848 else
1849 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1851 decl = build_decl (gimple_location (ctx->stmt),
1852 FUNCTION_DECL, name, type);
1854 if (!task_copy)
1855 ctx->cb.dst_fn = decl;
1856 else
1857 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1859 TREE_STATIC (decl) = 1;
1860 TREE_USED (decl) = 1;
1861 DECL_ARTIFICIAL (decl) = 1;
1862 DECL_NAMELESS (decl) = 1;
1863 DECL_IGNORED_P (decl) = 0;
1864 TREE_PUBLIC (decl) = 0;
1865 DECL_UNINLINABLE (decl) = 1;
1866 DECL_EXTERNAL (decl) = 0;
1867 DECL_CONTEXT (decl) = NULL_TREE;
1868 DECL_INITIAL (decl) = make_node (BLOCK);
1869 bool target_p = false;
1870 if (lookup_attribute ("omp declare target",
1871 DECL_ATTRIBUTES (current_function_decl)))
1872 target_p = true;
1873 else
1875 omp_context *octx;
1876 for (octx = ctx; octx; octx = octx->outer)
1877 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1878 && gimple_omp_target_kind (octx->stmt)
1879 == GF_OMP_TARGET_KIND_REGION)
1881 target_p = true;
1882 break;
1885 if (target_p)
1886 DECL_ATTRIBUTES (decl)
1887 = tree_cons (get_identifier ("omp declare target"),
1888 NULL_TREE, DECL_ATTRIBUTES (decl));
1890 t = build_decl (DECL_SOURCE_LOCATION (decl),
1891 RESULT_DECL, NULL_TREE, void_type_node);
1892 DECL_ARTIFICIAL (t) = 1;
1893 DECL_IGNORED_P (t) = 1;
1894 DECL_CONTEXT (t) = decl;
1895 DECL_RESULT (decl) = t;
1897 t = build_decl (DECL_SOURCE_LOCATION (decl),
1898 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1899 DECL_ARTIFICIAL (t) = 1;
1900 DECL_NAMELESS (t) = 1;
1901 DECL_ARG_TYPE (t) = ptr_type_node;
1902 DECL_CONTEXT (t) = current_function_decl;
1903 TREE_USED (t) = 1;
1904 DECL_ARGUMENTS (decl) = t;
1905 if (!task_copy)
1906 ctx->receiver_decl = t;
1907 else
1909 t = build_decl (DECL_SOURCE_LOCATION (decl),
1910 PARM_DECL, get_identifier (".omp_data_o"),
1911 ptr_type_node);
1912 DECL_ARTIFICIAL (t) = 1;
1913 DECL_NAMELESS (t) = 1;
1914 DECL_ARG_TYPE (t) = ptr_type_node;
1915 DECL_CONTEXT (t) = current_function_decl;
1916 TREE_USED (t) = 1;
1917 TREE_ADDRESSABLE (t) = 1;
1918 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1919 DECL_ARGUMENTS (decl) = t;
1922 /* Allocate memory for the function structure. The call to
1923 allocate_struct_function clobbers CFUN, so we need to restore
1924 it afterward. */
1925 push_struct_function (decl);
1926 cfun->function_end_locus = gimple_location (ctx->stmt);
1927 pop_cfun ();
1930 /* Callback for walk_gimple_seq. Check if combined parallel
1931 contains gimple_omp_for_combined_into_p OMP_FOR. */
1933 static tree
1934 find_combined_for (gimple_stmt_iterator *gsi_p,
1935 bool *handled_ops_p,
1936 struct walk_stmt_info *wi)
1938 gimple stmt = gsi_stmt (*gsi_p);
1940 *handled_ops_p = true;
1941 switch (gimple_code (stmt))
1943 WALK_SUBSTMTS;
1945 case GIMPLE_OMP_FOR:
1946 if (gimple_omp_for_combined_into_p (stmt)
1947 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
1949 wi->info = stmt;
1950 return integer_zero_node;
1952 break;
1953 default:
1954 break;
1956 return NULL;
1959 /* Scan an OpenMP parallel directive. */
1961 static void
1962 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1964 omp_context *ctx;
1965 tree name;
1966 gimple stmt = gsi_stmt (*gsi);
1968 /* Ignore parallel directives with empty bodies, unless there
1969 are copyin clauses. */
1970 if (optimize > 0
1971 && empty_body_p (gimple_omp_body (stmt))
1972 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1973 OMP_CLAUSE_COPYIN) == NULL)
1975 gsi_replace (gsi, gimple_build_nop (), false);
1976 return;
1979 if (gimple_omp_parallel_combined_p (stmt))
1981 gimple for_stmt;
1982 struct walk_stmt_info wi;
1984 memset (&wi, 0, sizeof (wi));
1985 wi.val_only = true;
1986 walk_gimple_seq (gimple_omp_body (stmt),
1987 find_combined_for, NULL, &wi);
1988 for_stmt = (gimple) wi.info;
1989 if (for_stmt)
1991 struct omp_for_data fd;
1992 extract_omp_for_data (for_stmt, &fd, NULL);
1993 /* We need two temporaries with fd.loop.v type (istart/iend)
1994 and then (fd.collapse - 1) temporaries with the same
1995 type for count2 ... countN-1 vars if not constant. */
1996 size_t count = 2, i;
1997 tree type = fd.iter_type;
1998 if (fd.collapse > 1
1999 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2000 count += fd.collapse - 1;
2001 for (i = 0; i < count; i++)
2003 tree temp = create_tmp_var (type, NULL);
2004 tree c = build_omp_clause (UNKNOWN_LOCATION,
2005 OMP_CLAUSE__LOOPTEMP_);
2006 OMP_CLAUSE_DECL (c) = temp;
2007 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2008 gimple_omp_parallel_set_clauses (stmt, c);
2013 ctx = new_omp_context (stmt, outer_ctx);
2014 if (taskreg_nesting_level > 1)
2015 ctx->is_nested = true;
2016 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2017 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2018 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2019 name = create_tmp_var_name (".omp_data_s");
2020 name = build_decl (gimple_location (stmt),
2021 TYPE_DECL, name, ctx->record_type);
2022 DECL_ARTIFICIAL (name) = 1;
2023 DECL_NAMELESS (name) = 1;
2024 TYPE_NAME (ctx->record_type) = name;
2025 create_omp_child_function (ctx, false);
2026 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2028 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2029 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2031 if (TYPE_FIELDS (ctx->record_type) == NULL)
2032 ctx->record_type = ctx->receiver_decl = NULL;
2033 else
2035 layout_type (ctx->record_type);
2036 fixup_child_record_type (ctx);
2040 /* Scan an OpenMP task directive. */
2042 static void
2043 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2045 omp_context *ctx;
2046 tree name, t;
2047 gimple stmt = gsi_stmt (*gsi);
2048 location_t loc = gimple_location (stmt);
2050 /* Ignore task directives with empty bodies. */
2051 if (optimize > 0
2052 && empty_body_p (gimple_omp_body (stmt)))
2054 gsi_replace (gsi, gimple_build_nop (), false);
2055 return;
2058 ctx = new_omp_context (stmt, outer_ctx);
2059 if (taskreg_nesting_level > 1)
2060 ctx->is_nested = true;
2061 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2062 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2063 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2064 name = create_tmp_var_name (".omp_data_s");
2065 name = build_decl (gimple_location (stmt),
2066 TYPE_DECL, name, ctx->record_type);
2067 DECL_ARTIFICIAL (name) = 1;
2068 DECL_NAMELESS (name) = 1;
2069 TYPE_NAME (ctx->record_type) = name;
2070 create_omp_child_function (ctx, false);
2071 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2073 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2075 if (ctx->srecord_type)
2077 name = create_tmp_var_name (".omp_data_a");
2078 name = build_decl (gimple_location (stmt),
2079 TYPE_DECL, name, ctx->srecord_type);
2080 DECL_ARTIFICIAL (name) = 1;
2081 DECL_NAMELESS (name) = 1;
2082 TYPE_NAME (ctx->srecord_type) = name;
2083 create_omp_child_function (ctx, true);
2086 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2088 if (TYPE_FIELDS (ctx->record_type) == NULL)
2090 ctx->record_type = ctx->receiver_decl = NULL;
2091 t = build_int_cst (long_integer_type_node, 0);
2092 gimple_omp_task_set_arg_size (stmt, t);
2093 t = build_int_cst (long_integer_type_node, 1);
2094 gimple_omp_task_set_arg_align (stmt, t);
2096 else
2098 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2099 /* Move VLA fields to the end. */
2100 p = &TYPE_FIELDS (ctx->record_type);
2101 while (*p)
2102 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2103 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2105 *q = *p;
2106 *p = TREE_CHAIN (*p);
2107 TREE_CHAIN (*q) = NULL_TREE;
2108 q = &TREE_CHAIN (*q);
2110 else
2111 p = &DECL_CHAIN (*p);
2112 *p = vla_fields;
2113 layout_type (ctx->record_type);
2114 fixup_child_record_type (ctx);
2115 if (ctx->srecord_type)
2116 layout_type (ctx->srecord_type);
2117 t = fold_convert_loc (loc, long_integer_type_node,
2118 TYPE_SIZE_UNIT (ctx->record_type));
2119 gimple_omp_task_set_arg_size (stmt, t);
2120 t = build_int_cst (long_integer_type_node,
2121 TYPE_ALIGN_UNIT (ctx->record_type));
2122 gimple_omp_task_set_arg_align (stmt, t);
2127 /* Scan an OpenMP loop directive. */
2129 static void
2130 scan_omp_for (gimple stmt, omp_context *outer_ctx)
2132 omp_context *ctx;
2133 size_t i;
2135 ctx = new_omp_context (stmt, outer_ctx);
2137 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2139 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2140 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2142 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2143 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2144 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2145 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2147 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2150 /* Scan an OpenMP sections directive. */
2152 static void
2153 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
2155 omp_context *ctx;
2157 ctx = new_omp_context (stmt, outer_ctx);
2158 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2159 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2162 /* Scan an OpenMP single directive. */
2164 static void
2165 scan_omp_single (gimple stmt, omp_context *outer_ctx)
2167 omp_context *ctx;
2168 tree name;
2170 ctx = new_omp_context (stmt, outer_ctx);
2171 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2172 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2173 name = create_tmp_var_name (".omp_copy_s");
2174 name = build_decl (gimple_location (stmt),
2175 TYPE_DECL, name, ctx->record_type);
2176 TYPE_NAME (ctx->record_type) = name;
2178 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2179 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2181 if (TYPE_FIELDS (ctx->record_type) == NULL)
2182 ctx->record_type = NULL;
2183 else
2184 layout_type (ctx->record_type);
2187 /* Scan an OpenMP target{, data, update} directive. */
2189 static void
2190 scan_omp_target (gimple stmt, omp_context *outer_ctx)
2192 omp_context *ctx;
2193 tree name;
2194 int kind = gimple_omp_target_kind (stmt);
2196 ctx = new_omp_context (stmt, outer_ctx);
2197 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2198 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2199 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2200 name = create_tmp_var_name (".omp_data_t");
2201 name = build_decl (gimple_location (stmt),
2202 TYPE_DECL, name, ctx->record_type);
2203 DECL_ARTIFICIAL (name) = 1;
2204 DECL_NAMELESS (name) = 1;
2205 TYPE_NAME (ctx->record_type) = name;
2206 if (kind == GF_OMP_TARGET_KIND_REGION)
2208 create_omp_child_function (ctx, false);
2209 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2212 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2213 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2215 if (TYPE_FIELDS (ctx->record_type) == NULL)
2216 ctx->record_type = ctx->receiver_decl = NULL;
2217 else
2219 TYPE_FIELDS (ctx->record_type)
2220 = nreverse (TYPE_FIELDS (ctx->record_type));
2221 #ifdef ENABLE_CHECKING
2222 tree field;
2223 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2224 for (field = TYPE_FIELDS (ctx->record_type);
2225 field;
2226 field = DECL_CHAIN (field))
2227 gcc_assert (DECL_ALIGN (field) == align);
2228 #endif
2229 layout_type (ctx->record_type);
2230 if (kind == GF_OMP_TARGET_KIND_REGION)
2231 fixup_child_record_type (ctx);
2235 /* Scan an OpenMP teams directive. */
2237 static void
2238 scan_omp_teams (gimple stmt, omp_context *outer_ctx)
2240 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2241 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2242 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2245 /* Check OpenMP nesting restrictions. */
2246 static bool
2247 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2249 if (ctx != NULL)
2251 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2252 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
2254 error_at (gimple_location (stmt),
2255 "OpenMP constructs may not be nested inside simd region");
2256 return false;
2258 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2260 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2261 || (gimple_omp_for_kind (stmt)
2262 != GF_OMP_FOR_KIND_DISTRIBUTE))
2263 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2265 error_at (gimple_location (stmt),
2266 "only distribute or parallel constructs are allowed to "
2267 "be closely nested inside teams construct");
2268 return false;
2272 switch (gimple_code (stmt))
2274 case GIMPLE_OMP_FOR:
2275 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_KIND_SIMD)
2276 return true;
2277 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2279 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2281 error_at (gimple_location (stmt),
2282 "distribute construct must be closely nested inside "
2283 "teams construct");
2284 return false;
2286 return true;
2288 /* FALLTHRU */
2289 case GIMPLE_CALL:
2290 if (is_gimple_call (stmt)
2291 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2292 == BUILT_IN_GOMP_CANCEL
2293 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2294 == BUILT_IN_GOMP_CANCELLATION_POINT))
2296 const char *bad = NULL;
2297 const char *kind = NULL;
2298 if (ctx == NULL)
2300 error_at (gimple_location (stmt), "orphaned %qs construct",
2301 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2302 == BUILT_IN_GOMP_CANCEL
2303 ? "#pragma omp cancel"
2304 : "#pragma omp cancellation point");
2305 return false;
2307 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2308 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2309 : 0)
2311 case 1:
2312 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2313 bad = "#pragma omp parallel";
2314 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2315 == BUILT_IN_GOMP_CANCEL
2316 && !integer_zerop (gimple_call_arg (stmt, 1)))
2317 ctx->cancellable = true;
2318 kind = "parallel";
2319 break;
2320 case 2:
2321 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2322 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2323 bad = "#pragma omp for";
2324 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2325 == BUILT_IN_GOMP_CANCEL
2326 && !integer_zerop (gimple_call_arg (stmt, 1)))
2328 ctx->cancellable = true;
2329 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2330 OMP_CLAUSE_NOWAIT))
2331 warning_at (gimple_location (stmt), 0,
2332 "%<#pragma omp cancel for%> inside "
2333 "%<nowait%> for construct");
2334 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2335 OMP_CLAUSE_ORDERED))
2336 warning_at (gimple_location (stmt), 0,
2337 "%<#pragma omp cancel for%> inside "
2338 "%<ordered%> for construct");
2340 kind = "for";
2341 break;
2342 case 4:
2343 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2344 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2345 bad = "#pragma omp sections";
2346 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2347 == BUILT_IN_GOMP_CANCEL
2348 && !integer_zerop (gimple_call_arg (stmt, 1)))
2350 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2352 ctx->cancellable = true;
2353 if (find_omp_clause (gimple_omp_sections_clauses
2354 (ctx->stmt),
2355 OMP_CLAUSE_NOWAIT))
2356 warning_at (gimple_location (stmt), 0,
2357 "%<#pragma omp cancel sections%> inside "
2358 "%<nowait%> sections construct");
2360 else
2362 gcc_assert (ctx->outer
2363 && gimple_code (ctx->outer->stmt)
2364 == GIMPLE_OMP_SECTIONS);
2365 ctx->outer->cancellable = true;
2366 if (find_omp_clause (gimple_omp_sections_clauses
2367 (ctx->outer->stmt),
2368 OMP_CLAUSE_NOWAIT))
2369 warning_at (gimple_location (stmt), 0,
2370 "%<#pragma omp cancel sections%> inside "
2371 "%<nowait%> sections construct");
2374 kind = "sections";
2375 break;
2376 case 8:
2377 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2378 bad = "#pragma omp task";
2379 else
2380 ctx->cancellable = true;
2381 kind = "taskgroup";
2382 break;
2383 default:
2384 error_at (gimple_location (stmt), "invalid arguments");
2385 return false;
2387 if (bad)
2389 error_at (gimple_location (stmt),
2390 "%<%s %s%> construct not closely nested inside of %qs",
2391 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2392 == BUILT_IN_GOMP_CANCEL
2393 ? "#pragma omp cancel"
2394 : "#pragma omp cancellation point", kind, bad);
2395 return false;
2398 /* FALLTHRU */
2399 case GIMPLE_OMP_SECTIONS:
2400 case GIMPLE_OMP_SINGLE:
2401 for (; ctx != NULL; ctx = ctx->outer)
2402 switch (gimple_code (ctx->stmt))
2404 case GIMPLE_OMP_FOR:
2405 case GIMPLE_OMP_SECTIONS:
2406 case GIMPLE_OMP_SINGLE:
2407 case GIMPLE_OMP_ORDERED:
2408 case GIMPLE_OMP_MASTER:
2409 case GIMPLE_OMP_TASK:
2410 case GIMPLE_OMP_CRITICAL:
2411 if (is_gimple_call (stmt))
2413 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2414 != BUILT_IN_GOMP_BARRIER)
2415 return true;
2416 error_at (gimple_location (stmt),
2417 "barrier region may not be closely nested inside "
2418 "of work-sharing, critical, ordered, master or "
2419 "explicit task region");
2420 return false;
2422 error_at (gimple_location (stmt),
2423 "work-sharing region may not be closely nested inside "
2424 "of work-sharing, critical, ordered, master or explicit "
2425 "task region");
2426 return false;
2427 case GIMPLE_OMP_PARALLEL:
2428 return true;
2429 default:
2430 break;
2432 break;
2433 case GIMPLE_OMP_MASTER:
2434 for (; ctx != NULL; ctx = ctx->outer)
2435 switch (gimple_code (ctx->stmt))
2437 case GIMPLE_OMP_FOR:
2438 case GIMPLE_OMP_SECTIONS:
2439 case GIMPLE_OMP_SINGLE:
2440 case GIMPLE_OMP_TASK:
2441 error_at (gimple_location (stmt),
2442 "master region may not be closely nested inside "
2443 "of work-sharing or explicit task region");
2444 return false;
2445 case GIMPLE_OMP_PARALLEL:
2446 return true;
2447 default:
2448 break;
2450 break;
2451 case GIMPLE_OMP_ORDERED:
2452 for (; ctx != NULL; ctx = ctx->outer)
2453 switch (gimple_code (ctx->stmt))
2455 case GIMPLE_OMP_CRITICAL:
2456 case GIMPLE_OMP_TASK:
2457 error_at (gimple_location (stmt),
2458 "ordered region may not be closely nested inside "
2459 "of critical or explicit task region");
2460 return false;
2461 case GIMPLE_OMP_FOR:
2462 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2463 OMP_CLAUSE_ORDERED) == NULL)
2465 error_at (gimple_location (stmt),
2466 "ordered region must be closely nested inside "
2467 "a loop region with an ordered clause");
2468 return false;
2470 return true;
2471 case GIMPLE_OMP_PARALLEL:
2472 error_at (gimple_location (stmt),
2473 "ordered region must be closely nested inside "
2474 "a loop region with an ordered clause");
2475 return false;
2476 default:
2477 break;
2479 break;
2480 case GIMPLE_OMP_CRITICAL:
2481 for (; ctx != NULL; ctx = ctx->outer)
2482 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
2483 && (gimple_omp_critical_name (stmt)
2484 == gimple_omp_critical_name (ctx->stmt)))
2486 error_at (gimple_location (stmt),
2487 "critical region may not be nested inside a critical "
2488 "region with the same name");
2489 return false;
2491 break;
2492 case GIMPLE_OMP_TEAMS:
2493 if (ctx == NULL
2494 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2495 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2497 error_at (gimple_location (stmt),
2498 "teams construct not closely nested inside of target "
2499 "region");
2500 return false;
2502 break;
2503 default:
2504 break;
2506 return true;
2510 /* Helper function scan_omp.
2512 Callback for walk_tree or operators in walk_gimple_stmt used to
2513 scan for OpenMP directives in TP. */
2515 static tree
2516 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2518 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2519 omp_context *ctx = (omp_context *) wi->info;
2520 tree t = *tp;
2522 switch (TREE_CODE (t))
2524 case VAR_DECL:
2525 case PARM_DECL:
2526 case LABEL_DECL:
2527 case RESULT_DECL:
2528 if (ctx)
2529 *tp = remap_decl (t, &ctx->cb);
2530 break;
2532 default:
2533 if (ctx && TYPE_P (t))
2534 *tp = remap_type (t, &ctx->cb);
2535 else if (!DECL_P (t))
2537 *walk_subtrees = 1;
2538 if (ctx)
2540 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2541 if (tem != TREE_TYPE (t))
2543 if (TREE_CODE (t) == INTEGER_CST)
2544 *tp = wide_int_to_tree (tem, t);
2545 else
2546 TREE_TYPE (t) = tem;
2550 break;
2553 return NULL_TREE;
2556 /* Return true if FNDECL is a setjmp or a longjmp. */
2558 static bool
2559 setjmp_or_longjmp_p (const_tree fndecl)
2561 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2562 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2563 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2564 return true;
2566 tree declname = DECL_NAME (fndecl);
2567 if (!declname)
2568 return false;
2569 const char *name = IDENTIFIER_POINTER (declname);
2570 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2574 /* Helper function for scan_omp.
2576 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2577 the current statement in GSI. */
2579 static tree
2580 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2581 struct walk_stmt_info *wi)
2583 gimple stmt = gsi_stmt (*gsi);
2584 omp_context *ctx = (omp_context *) wi->info;
2586 if (gimple_has_location (stmt))
2587 input_location = gimple_location (stmt);
2589 /* Check the OpenMP nesting restrictions. */
2590 bool remove = false;
2591 if (is_gimple_omp (stmt))
2592 remove = !check_omp_nesting_restrictions (stmt, ctx);
2593 else if (is_gimple_call (stmt))
2595 tree fndecl = gimple_call_fndecl (stmt);
2596 if (fndecl)
2598 if (setjmp_or_longjmp_p (fndecl)
2599 && ctx
2600 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2601 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
2603 remove = true;
2604 error_at (gimple_location (stmt),
2605 "setjmp/longjmp inside simd construct");
2607 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2608 switch (DECL_FUNCTION_CODE (fndecl))
2610 case BUILT_IN_GOMP_BARRIER:
2611 case BUILT_IN_GOMP_CANCEL:
2612 case BUILT_IN_GOMP_CANCELLATION_POINT:
2613 case BUILT_IN_GOMP_TASKYIELD:
2614 case BUILT_IN_GOMP_TASKWAIT:
2615 case BUILT_IN_GOMP_TASKGROUP_START:
2616 case BUILT_IN_GOMP_TASKGROUP_END:
2617 remove = !check_omp_nesting_restrictions (stmt, ctx);
2618 break;
2619 default:
2620 break;
2624 if (remove)
2626 stmt = gimple_build_nop ();
2627 gsi_replace (gsi, stmt, false);
2630 *handled_ops_p = true;
2632 switch (gimple_code (stmt))
2634 case GIMPLE_OMP_PARALLEL:
2635 taskreg_nesting_level++;
2636 scan_omp_parallel (gsi, ctx);
2637 taskreg_nesting_level--;
2638 break;
2640 case GIMPLE_OMP_TASK:
2641 taskreg_nesting_level++;
2642 scan_omp_task (gsi, ctx);
2643 taskreg_nesting_level--;
2644 break;
2646 case GIMPLE_OMP_FOR:
2647 scan_omp_for (stmt, ctx);
2648 break;
2650 case GIMPLE_OMP_SECTIONS:
2651 scan_omp_sections (stmt, ctx);
2652 break;
2654 case GIMPLE_OMP_SINGLE:
2655 scan_omp_single (stmt, ctx);
2656 break;
2658 case GIMPLE_OMP_SECTION:
2659 case GIMPLE_OMP_MASTER:
2660 case GIMPLE_OMP_TASKGROUP:
2661 case GIMPLE_OMP_ORDERED:
2662 case GIMPLE_OMP_CRITICAL:
2663 ctx = new_omp_context (stmt, ctx);
2664 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2665 break;
2667 case GIMPLE_OMP_TARGET:
2668 scan_omp_target (stmt, ctx);
2669 break;
2671 case GIMPLE_OMP_TEAMS:
2672 scan_omp_teams (stmt, ctx);
2673 break;
2675 case GIMPLE_BIND:
2677 tree var;
2679 *handled_ops_p = false;
2680 if (ctx)
2681 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2682 insert_decl_map (&ctx->cb, var, var);
2684 break;
2685 default:
2686 *handled_ops_p = false;
2687 break;
2690 return NULL_TREE;
2694 /* Scan all the statements starting at the current statement. CTX
2695 contains context information about the OpenMP directives and
2696 clauses found during the scan. */
2698 static void
2699 scan_omp (gimple_seq *body_p, omp_context *ctx)
2701 location_t saved_location;
2702 struct walk_stmt_info wi;
2704 memset (&wi, 0, sizeof (wi));
2705 wi.info = ctx;
2706 wi.want_locations = true;
2708 saved_location = input_location;
2709 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2710 input_location = saved_location;
2713 /* Re-gimplification and code generation routines. */
2715 /* Build a call to GOMP_barrier. */
2717 static gimple
2718 build_omp_barrier (tree lhs)
2720 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2721 : BUILT_IN_GOMP_BARRIER);
2722 gimple g = gimple_build_call (fndecl, 0);
2723 if (lhs)
2724 gimple_call_set_lhs (g, lhs);
2725 return g;
2728 /* If a context was created for STMT when it was scanned, return it. */
2730 static omp_context *
2731 maybe_lookup_ctx (gimple stmt)
2733 splay_tree_node n;
2734 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2735 return n ? (omp_context *) n->value : NULL;
2739 /* Find the mapping for DECL in CTX or the immediately enclosing
2740 context that has a mapping for DECL.
2742 If CTX is a nested parallel directive, we may have to use the decl
2743 mappings created in CTX's parent context. Suppose that we have the
2744 following parallel nesting (variable UIDs showed for clarity):
2746 iD.1562 = 0;
2747 #omp parallel shared(iD.1562) -> outer parallel
2748 iD.1562 = iD.1562 + 1;
2750 #omp parallel shared (iD.1562) -> inner parallel
2751 iD.1562 = iD.1562 - 1;
2753 Each parallel structure will create a distinct .omp_data_s structure
2754 for copying iD.1562 in/out of the directive:
2756 outer parallel .omp_data_s.1.i -> iD.1562
2757 inner parallel .omp_data_s.2.i -> iD.1562
2759 A shared variable mapping will produce a copy-out operation before
2760 the parallel directive and a copy-in operation after it. So, in
2761 this case we would have:
2763 iD.1562 = 0;
2764 .omp_data_o.1.i = iD.1562;
2765 #omp parallel shared(iD.1562) -> outer parallel
2766 .omp_data_i.1 = &.omp_data_o.1
2767 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2769 .omp_data_o.2.i = iD.1562; -> **
2770 #omp parallel shared(iD.1562) -> inner parallel
2771 .omp_data_i.2 = &.omp_data_o.2
2772 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2775 ** This is a problem. The symbol iD.1562 cannot be referenced
2776 inside the body of the outer parallel region. But since we are
2777 emitting this copy operation while expanding the inner parallel
2778 directive, we need to access the CTX structure of the outer
2779 parallel directive to get the correct mapping:
2781 .omp_data_o.2.i = .omp_data_i.1->i
2783 Since there may be other workshare or parallel directives enclosing
2784 the parallel directive, it may be necessary to walk up the context
2785 parent chain. This is not a problem in general because nested
2786 parallelism happens only rarely. */
2788 static tree
2789 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2791 tree t;
2792 omp_context *up;
2794 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2795 t = maybe_lookup_decl (decl, up);
2797 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2799 return t ? t : decl;
2803 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2804 in outer contexts. */
2806 static tree
2807 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2809 tree t = NULL;
2810 omp_context *up;
2812 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2813 t = maybe_lookup_decl (decl, up);
2815 return t ? t : decl;
2819 /* Construct the initialization value for reduction CLAUSE. */
2821 tree
2822 omp_reduction_init (tree clause, tree type)
2824 location_t loc = OMP_CLAUSE_LOCATION (clause);
2825 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2827 case PLUS_EXPR:
2828 case MINUS_EXPR:
2829 case BIT_IOR_EXPR:
2830 case BIT_XOR_EXPR:
2831 case TRUTH_OR_EXPR:
2832 case TRUTH_ORIF_EXPR:
2833 case TRUTH_XOR_EXPR:
2834 case NE_EXPR:
2835 return build_zero_cst (type);
2837 case MULT_EXPR:
2838 case TRUTH_AND_EXPR:
2839 case TRUTH_ANDIF_EXPR:
2840 case EQ_EXPR:
2841 return fold_convert_loc (loc, type, integer_one_node);
2843 case BIT_AND_EXPR:
2844 return fold_convert_loc (loc, type, integer_minus_one_node);
2846 case MAX_EXPR:
2847 if (SCALAR_FLOAT_TYPE_P (type))
2849 REAL_VALUE_TYPE max, min;
2850 if (HONOR_INFINITIES (TYPE_MODE (type)))
2852 real_inf (&max);
2853 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2855 else
2856 real_maxval (&min, 1, TYPE_MODE (type));
2857 return build_real (type, min);
2859 else
2861 gcc_assert (INTEGRAL_TYPE_P (type));
2862 return TYPE_MIN_VALUE (type);
2865 case MIN_EXPR:
2866 if (SCALAR_FLOAT_TYPE_P (type))
2868 REAL_VALUE_TYPE max;
2869 if (HONOR_INFINITIES (TYPE_MODE (type)))
2870 real_inf (&max);
2871 else
2872 real_maxval (&max, 0, TYPE_MODE (type));
2873 return build_real (type, max);
2875 else
2877 gcc_assert (INTEGRAL_TYPE_P (type));
2878 return TYPE_MAX_VALUE (type);
2881 default:
2882 gcc_unreachable ();
2886 /* Return alignment to be assumed for var in CLAUSE, which should be
2887 OMP_CLAUSE_ALIGNED. */
2889 static tree
2890 omp_clause_aligned_alignment (tree clause)
2892 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
2893 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
2895 /* Otherwise return implementation defined alignment. */
2896 unsigned int al = 1;
2897 enum machine_mode mode, vmode;
2898 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2899 if (vs)
2900 vs = 1 << floor_log2 (vs);
2901 static enum mode_class classes[]
2902 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
2903 for (int i = 0; i < 4; i += 2)
2904 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
2905 mode != VOIDmode;
2906 mode = GET_MODE_WIDER_MODE (mode))
2908 vmode = targetm.vectorize.preferred_simd_mode (mode);
2909 if (GET_MODE_CLASS (vmode) != classes[i + 1])
2910 continue;
2911 while (vs
2912 && GET_MODE_SIZE (vmode) < vs
2913 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
2914 vmode = GET_MODE_2XWIDER_MODE (vmode);
2916 tree type = lang_hooks.types.type_for_mode (mode, 1);
2917 if (type == NULL_TREE || TYPE_MODE (type) != mode)
2918 continue;
2919 type = build_vector_type (type, GET_MODE_SIZE (vmode)
2920 / GET_MODE_SIZE (mode));
2921 if (TYPE_MODE (type) != vmode)
2922 continue;
2923 if (TYPE_ALIGN_UNIT (type) > al)
2924 al = TYPE_ALIGN_UNIT (type);
2926 return build_int_cst (integer_type_node, al);
2929 /* Return maximum possible vectorization factor for the target. */
2931 static int
2932 omp_max_vf (void)
2934 if (!optimize
2935 || optimize_debug
2936 || !flag_tree_loop_optimize
2937 || (!flag_tree_loop_vectorize
2938 && (global_options_set.x_flag_tree_loop_vectorize
2939 || global_options_set.x_flag_tree_vectorize)))
2940 return 1;
2942 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2943 if (vs)
2945 vs = 1 << floor_log2 (vs);
2946 return vs;
2948 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
2949 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
2950 return GET_MODE_NUNITS (vqimode);
2951 return 1;
2954 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2955 privatization. */
2957 static bool
2958 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
2959 tree &idx, tree &lane, tree &ivar, tree &lvar)
2961 if (max_vf == 0)
2963 max_vf = omp_max_vf ();
2964 if (max_vf > 1)
2966 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2967 OMP_CLAUSE_SAFELEN);
2968 if (c
2969 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == -1)
2970 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
2972 if (max_vf > 1)
2974 idx = create_tmp_var (unsigned_type_node, NULL);
2975 lane = create_tmp_var (unsigned_type_node, NULL);
2978 if (max_vf == 1)
2979 return false;
2981 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
2982 tree avar = create_tmp_var_raw (atype, NULL);
2983 if (TREE_ADDRESSABLE (new_var))
2984 TREE_ADDRESSABLE (avar) = 1;
2985 DECL_ATTRIBUTES (avar)
2986 = tree_cons (get_identifier ("omp simd array"), NULL,
2987 DECL_ATTRIBUTES (avar));
2988 gimple_add_tmp_var (avar);
2989 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
2990 NULL_TREE, NULL_TREE);
2991 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
2992 NULL_TREE, NULL_TREE);
2993 if (DECL_P (new_var))
2995 SET_DECL_VALUE_EXPR (new_var, lvar);
2996 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2998 return true;
3001 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3002 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3003 private variables. Initialization statements go in ILIST, while calls
3004 to destructors go in DLIST. */
3006 static void
3007 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3008 omp_context *ctx, struct omp_for_data *fd)
3010 tree c, dtor, copyin_seq, x, ptr;
3011 bool copyin_by_ref = false;
3012 bool lastprivate_firstprivate = false;
3013 bool reduction_omp_orig_ref = false;
3014 int pass;
3015 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3016 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD);
3017 int max_vf = 0;
3018 tree lane = NULL_TREE, idx = NULL_TREE;
3019 tree ivar = NULL_TREE, lvar = NULL_TREE;
3020 gimple_seq llist[2] = { NULL, NULL };
3022 copyin_seq = NULL;
3024 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3025 with data sharing clauses referencing variable sized vars. That
3026 is unnecessarily hard to support and very unlikely to result in
3027 vectorized code anyway. */
3028 if (is_simd)
3029 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3030 switch (OMP_CLAUSE_CODE (c))
3032 case OMP_CLAUSE_REDUCTION:
3033 case OMP_CLAUSE_PRIVATE:
3034 case OMP_CLAUSE_FIRSTPRIVATE:
3035 case OMP_CLAUSE_LASTPRIVATE:
3036 case OMP_CLAUSE_LINEAR:
3037 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3038 max_vf = 1;
3039 break;
3040 default:
3041 continue;
3044 /* Do all the fixed sized types in the first pass, and the variable sized
3045 types in the second pass. This makes sure that the scalar arguments to
3046 the variable sized types are processed before we use them in the
3047 variable sized operations. */
3048 for (pass = 0; pass < 2; ++pass)
3050 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3052 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3053 tree var, new_var;
3054 bool by_ref;
3055 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3057 switch (c_kind)
3059 case OMP_CLAUSE_PRIVATE:
3060 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3061 continue;
3062 break;
3063 case OMP_CLAUSE_SHARED:
3064 /* Ignore shared directives in teams construct. */
3065 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3066 continue;
3067 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3069 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3070 continue;
3072 case OMP_CLAUSE_FIRSTPRIVATE:
3073 case OMP_CLAUSE_COPYIN:
3074 case OMP_CLAUSE_LINEAR:
3075 break;
3076 case OMP_CLAUSE_REDUCTION:
3077 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3078 reduction_omp_orig_ref = true;
3079 break;
3080 case OMP_CLAUSE__LOOPTEMP_:
3081 /* Handle _looptemp_ clauses only on parallel. */
3082 if (fd)
3083 continue;
3084 break;
3085 case OMP_CLAUSE_LASTPRIVATE:
3086 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3088 lastprivate_firstprivate = true;
3089 if (pass != 0)
3090 continue;
3092 break;
3093 case OMP_CLAUSE_ALIGNED:
3094 if (pass == 0)
3095 continue;
3096 var = OMP_CLAUSE_DECL (c);
3097 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3098 && !is_global_var (var))
3100 new_var = maybe_lookup_decl (var, ctx);
3101 if (new_var == NULL_TREE)
3102 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3103 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3104 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3105 omp_clause_aligned_alignment (c));
3106 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3107 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3108 gimplify_and_add (x, ilist);
3110 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3111 && is_global_var (var))
3113 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3114 new_var = lookup_decl (var, ctx);
3115 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3116 t = build_fold_addr_expr_loc (clause_loc, t);
3117 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3118 t = build_call_expr_loc (clause_loc, t2, 2, t,
3119 omp_clause_aligned_alignment (c));
3120 t = fold_convert_loc (clause_loc, ptype, t);
3121 x = create_tmp_var (ptype, NULL);
3122 t = build2 (MODIFY_EXPR, ptype, x, t);
3123 gimplify_and_add (t, ilist);
3124 t = build_simple_mem_ref_loc (clause_loc, x);
3125 SET_DECL_VALUE_EXPR (new_var, t);
3126 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3128 continue;
3129 default:
3130 continue;
3133 new_var = var = OMP_CLAUSE_DECL (c);
3134 if (c_kind != OMP_CLAUSE_COPYIN)
3135 new_var = lookup_decl (var, ctx);
3137 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3139 if (pass != 0)
3140 continue;
3142 else if (is_variable_sized (var))
3144 /* For variable sized types, we need to allocate the
3145 actual storage here. Call alloca and store the
3146 result in the pointer decl that we created elsewhere. */
3147 if (pass == 0)
3148 continue;
3150 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3152 gimple stmt;
3153 tree tmp, atmp;
3155 ptr = DECL_VALUE_EXPR (new_var);
3156 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3157 ptr = TREE_OPERAND (ptr, 0);
3158 gcc_assert (DECL_P (ptr));
3159 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3161 /* void *tmp = __builtin_alloca */
3162 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3163 stmt = gimple_build_call (atmp, 1, x);
3164 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3165 gimple_add_tmp_var (tmp);
3166 gimple_call_set_lhs (stmt, tmp);
3168 gimple_seq_add_stmt (ilist, stmt);
3170 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3171 gimplify_assign (ptr, x, ilist);
3174 else if (is_reference (var))
3176 /* For references that are being privatized for Fortran,
3177 allocate new backing storage for the new pointer
3178 variable. This allows us to avoid changing all the
3179 code that expects a pointer to something that expects
3180 a direct variable. */
3181 if (pass == 0)
3182 continue;
3184 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3185 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3187 x = build_receiver_ref (var, false, ctx);
3188 x = build_fold_addr_expr_loc (clause_loc, x);
3190 else if (TREE_CONSTANT (x))
3192 /* For reduction with placeholder in SIMD loop,
3193 defer adding the initialization of the reference,
3194 because if we decide to use SIMD array for it,
3195 the initilization could cause expansion ICE. */
3196 if (c_kind == OMP_CLAUSE_REDUCTION
3197 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
3198 && is_simd)
3199 x = NULL_TREE;
3200 else
3202 const char *name = NULL;
3203 if (DECL_NAME (var))
3204 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3206 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3207 name);
3208 gimple_add_tmp_var (x);
3209 TREE_ADDRESSABLE (x) = 1;
3210 x = build_fold_addr_expr_loc (clause_loc, x);
3213 else
3215 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3216 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3219 if (x)
3221 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3222 gimplify_assign (new_var, x, ilist);
3225 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3227 else if (c_kind == OMP_CLAUSE_REDUCTION
3228 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3230 if (pass == 0)
3231 continue;
3233 else if (pass != 0)
3234 continue;
3236 switch (OMP_CLAUSE_CODE (c))
3238 case OMP_CLAUSE_SHARED:
3239 /* Ignore shared directives in teams construct. */
3240 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3241 continue;
3242 /* Shared global vars are just accessed directly. */
3243 if (is_global_var (new_var))
3244 break;
3245 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3246 needs to be delayed until after fixup_child_record_type so
3247 that we get the correct type during the dereference. */
3248 by_ref = use_pointer_for_field (var, ctx);
3249 x = build_receiver_ref (var, by_ref, ctx);
3250 SET_DECL_VALUE_EXPR (new_var, x);
3251 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3253 /* ??? If VAR is not passed by reference, and the variable
3254 hasn't been initialized yet, then we'll get a warning for
3255 the store into the omp_data_s structure. Ideally, we'd be
3256 able to notice this and not store anything at all, but
3257 we're generating code too early. Suppress the warning. */
3258 if (!by_ref)
3259 TREE_NO_WARNING (var) = 1;
3260 break;
3262 case OMP_CLAUSE_LASTPRIVATE:
3263 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3264 break;
3265 /* FALLTHRU */
3267 case OMP_CLAUSE_PRIVATE:
3268 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3269 x = build_outer_var_ref (var, ctx);
3270 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3272 if (is_task_ctx (ctx))
3273 x = build_receiver_ref (var, false, ctx);
3274 else
3275 x = build_outer_var_ref (var, ctx);
3277 else
3278 x = NULL;
3279 do_private:
3280 tree nx;
3281 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3282 if (is_simd)
3284 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3285 if ((TREE_ADDRESSABLE (new_var) || nx || y
3286 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3287 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3288 idx, lane, ivar, lvar))
3290 if (nx)
3291 x = lang_hooks.decls.omp_clause_default_ctor
3292 (c, unshare_expr (ivar), x);
3293 if (nx && x)
3294 gimplify_and_add (x, &llist[0]);
3295 if (y)
3297 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3298 if (y)
3300 gimple_seq tseq = NULL;
3302 dtor = y;
3303 gimplify_stmt (&dtor, &tseq);
3304 gimple_seq_add_seq (&llist[1], tseq);
3307 break;
3310 if (nx)
3311 gimplify_and_add (nx, ilist);
3312 /* FALLTHRU */
3314 do_dtor:
3315 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3316 if (x)
3318 gimple_seq tseq = NULL;
3320 dtor = x;
3321 gimplify_stmt (&dtor, &tseq);
3322 gimple_seq_add_seq (dlist, tseq);
3324 break;
3326 case OMP_CLAUSE_LINEAR:
3327 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3328 goto do_firstprivate;
3329 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3330 x = NULL;
3331 else
3332 x = build_outer_var_ref (var, ctx);
3333 goto do_private;
3335 case OMP_CLAUSE_FIRSTPRIVATE:
3336 if (is_task_ctx (ctx))
3338 if (is_reference (var) || is_variable_sized (var))
3339 goto do_dtor;
3340 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3341 ctx))
3342 || use_pointer_for_field (var, NULL))
3344 x = build_receiver_ref (var, false, ctx);
3345 SET_DECL_VALUE_EXPR (new_var, x);
3346 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3347 goto do_dtor;
3350 do_firstprivate:
3351 x = build_outer_var_ref (var, ctx);
3352 if (is_simd)
3354 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3355 && gimple_omp_for_combined_into_p (ctx->stmt))
3357 tree stept = POINTER_TYPE_P (TREE_TYPE (x))
3358 ? sizetype : TREE_TYPE (x);
3359 tree t = fold_convert (stept,
3360 OMP_CLAUSE_LINEAR_STEP (c));
3361 tree c = find_omp_clause (clauses,
3362 OMP_CLAUSE__LOOPTEMP_);
3363 gcc_assert (c);
3364 tree l = OMP_CLAUSE_DECL (c);
3365 if (fd->collapse == 1)
3367 tree n1 = fd->loop.n1;
3368 tree step = fd->loop.step;
3369 tree itype = TREE_TYPE (l);
3370 if (POINTER_TYPE_P (itype))
3371 itype = signed_type_for (itype);
3372 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3373 if (TYPE_UNSIGNED (itype)
3374 && fd->loop.cond_code == GT_EXPR)
3375 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3376 fold_build1 (NEGATE_EXPR,
3377 itype, l),
3378 fold_build1 (NEGATE_EXPR,
3379 itype, step));
3380 else
3381 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3383 t = fold_build2 (MULT_EXPR, stept,
3384 fold_convert (stept, l), t);
3385 if (POINTER_TYPE_P (TREE_TYPE (x)))
3386 x = fold_build2 (POINTER_PLUS_EXPR,
3387 TREE_TYPE (x), x, t);
3388 else
3389 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3392 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3393 || TREE_ADDRESSABLE (new_var))
3394 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3395 idx, lane, ivar, lvar))
3397 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3399 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3400 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3401 gimplify_and_add (x, ilist);
3402 gimple_stmt_iterator gsi
3403 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3404 gimple g
3405 = gimple_build_assign (unshare_expr (lvar), iv);
3406 gsi_insert_before_without_update (&gsi, g,
3407 GSI_SAME_STMT);
3408 tree stept = POINTER_TYPE_P (TREE_TYPE (iv))
3409 ? sizetype : TREE_TYPE (iv);
3410 tree t = fold_convert (stept,
3411 OMP_CLAUSE_LINEAR_STEP (c));
3412 enum tree_code code = PLUS_EXPR;
3413 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3414 code = POINTER_PLUS_EXPR;
3415 g = gimple_build_assign_with_ops (code, iv, iv, t);
3416 gsi_insert_before_without_update (&gsi, g,
3417 GSI_SAME_STMT);
3418 break;
3420 x = lang_hooks.decls.omp_clause_copy_ctor
3421 (c, unshare_expr (ivar), x);
3422 gimplify_and_add (x, &llist[0]);
3423 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3424 if (x)
3426 gimple_seq tseq = NULL;
3428 dtor = x;
3429 gimplify_stmt (&dtor, &tseq);
3430 gimple_seq_add_seq (&llist[1], tseq);
3432 break;
3435 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3436 gimplify_and_add (x, ilist);
3437 goto do_dtor;
3439 case OMP_CLAUSE__LOOPTEMP_:
3440 gcc_assert (is_parallel_ctx (ctx));
3441 x = build_outer_var_ref (var, ctx);
3442 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3443 gimplify_and_add (x, ilist);
3444 break;
3446 case OMP_CLAUSE_COPYIN:
3447 by_ref = use_pointer_for_field (var, NULL);
3448 x = build_receiver_ref (var, by_ref, ctx);
3449 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3450 append_to_statement_list (x, &copyin_seq);
3451 copyin_by_ref |= by_ref;
3452 break;
3454 case OMP_CLAUSE_REDUCTION:
3455 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3457 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3458 gimple tseq;
3459 x = build_outer_var_ref (var, ctx);
3461 if (is_reference (var)
3462 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3463 TREE_TYPE (x)))
3464 x = build_fold_addr_expr_loc (clause_loc, x);
3465 SET_DECL_VALUE_EXPR (placeholder, x);
3466 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3467 tree new_vard = new_var;
3468 if (is_reference (var))
3470 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3471 new_vard = TREE_OPERAND (new_var, 0);
3472 gcc_assert (DECL_P (new_vard));
3474 if (is_simd
3475 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3476 idx, lane, ivar, lvar))
3478 if (new_vard == new_var)
3480 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3481 SET_DECL_VALUE_EXPR (new_var, ivar);
3483 else
3485 SET_DECL_VALUE_EXPR (new_vard,
3486 build_fold_addr_expr (ivar));
3487 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3489 x = lang_hooks.decls.omp_clause_default_ctor
3490 (c, unshare_expr (ivar),
3491 build_outer_var_ref (var, ctx));
3492 if (x)
3493 gimplify_and_add (x, &llist[0]);
3494 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3496 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3497 lower_omp (&tseq, ctx);
3498 gimple_seq_add_seq (&llist[0], tseq);
3500 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3501 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3502 lower_omp (&tseq, ctx);
3503 gimple_seq_add_seq (&llist[1], tseq);
3504 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3505 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3506 if (new_vard == new_var)
3507 SET_DECL_VALUE_EXPR (new_var, lvar);
3508 else
3509 SET_DECL_VALUE_EXPR (new_vard,
3510 build_fold_addr_expr (lvar));
3511 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3512 if (x)
3514 tseq = NULL;
3515 dtor = x;
3516 gimplify_stmt (&dtor, &tseq);
3517 gimple_seq_add_seq (&llist[1], tseq);
3519 break;
3521 /* If this is a reference to constant size reduction var
3522 with placeholder, we haven't emitted the initializer
3523 for it because it is undesirable if SIMD arrays are used.
3524 But if they aren't used, we need to emit the deferred
3525 initialization now. */
3526 else if (is_reference (var) && is_simd)
3528 tree z
3529 = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3530 if (TREE_CONSTANT (z))
3532 const char *name = NULL;
3533 if (DECL_NAME (var))
3534 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3536 z = create_tmp_var_raw
3537 (TREE_TYPE (TREE_TYPE (new_vard)), name);
3538 gimple_add_tmp_var (z);
3539 TREE_ADDRESSABLE (z) = 1;
3540 z = build_fold_addr_expr_loc (clause_loc, z);
3541 gimplify_assign (new_vard, z, ilist);
3544 x = lang_hooks.decls.omp_clause_default_ctor
3545 (c, new_var, unshare_expr (x));
3546 if (x)
3547 gimplify_and_add (x, ilist);
3548 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3550 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3551 lower_omp (&tseq, ctx);
3552 gimple_seq_add_seq (ilist, tseq);
3554 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3555 if (is_simd)
3557 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3558 lower_omp (&tseq, ctx);
3559 gimple_seq_add_seq (dlist, tseq);
3560 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3562 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3563 goto do_dtor;
3565 else
3567 x = omp_reduction_init (c, TREE_TYPE (new_var));
3568 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3569 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3571 /* reduction(-:var) sums up the partial results, so it
3572 acts identically to reduction(+:var). */
3573 if (code == MINUS_EXPR)
3574 code = PLUS_EXPR;
3576 if (is_simd
3577 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3578 idx, lane, ivar, lvar))
3580 tree ref = build_outer_var_ref (var, ctx);
3582 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3584 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3585 ref = build_outer_var_ref (var, ctx);
3586 gimplify_assign (ref, x, &llist[1]);
3588 else
3590 gimplify_assign (new_var, x, ilist);
3591 if (is_simd)
3593 tree ref = build_outer_var_ref (var, ctx);
3595 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3596 ref = build_outer_var_ref (var, ctx);
3597 gimplify_assign (ref, x, dlist);
3601 break;
3603 default:
3604 gcc_unreachable ();
3609 if (lane)
3611 tree uid = create_tmp_var (ptr_type_node, "simduid");
3612 /* Don't want uninit warnings on simduid, it is always uninitialized,
3613 but we use it not for the value, but for the DECL_UID only. */
3614 TREE_NO_WARNING (uid) = 1;
3615 gimple g
3616 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3617 gimple_call_set_lhs (g, lane);
3618 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3619 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3620 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3621 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3622 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3623 gimple_omp_for_set_clauses (ctx->stmt, c);
3624 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3625 build_int_cst (unsigned_type_node, 0),
3626 NULL_TREE);
3627 gimple_seq_add_stmt (ilist, g);
3628 for (int i = 0; i < 2; i++)
3629 if (llist[i])
3631 tree vf = create_tmp_var (unsigned_type_node, NULL);
3632 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3633 gimple_call_set_lhs (g, vf);
3634 gimple_seq *seq = i == 0 ? ilist : dlist;
3635 gimple_seq_add_stmt (seq, g);
3636 tree t = build_int_cst (unsigned_type_node, 0);
3637 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3638 gimple_seq_add_stmt (seq, g);
3639 tree body = create_artificial_label (UNKNOWN_LOCATION);
3640 tree header = create_artificial_label (UNKNOWN_LOCATION);
3641 tree end = create_artificial_label (UNKNOWN_LOCATION);
3642 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3643 gimple_seq_add_stmt (seq, gimple_build_label (body));
3644 gimple_seq_add_seq (seq, llist[i]);
3645 t = build_int_cst (unsigned_type_node, 1);
3646 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3647 gimple_seq_add_stmt (seq, g);
3648 gimple_seq_add_stmt (seq, gimple_build_label (header));
3649 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3650 gimple_seq_add_stmt (seq, g);
3651 gimple_seq_add_stmt (seq, gimple_build_label (end));
3655 /* The copyin sequence is not to be executed by the main thread, since
3656 that would result in self-copies. Perhaps not visible to scalars,
3657 but it certainly is to C++ operator=. */
3658 if (copyin_seq)
3660 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3662 x = build2 (NE_EXPR, boolean_type_node, x,
3663 build_int_cst (TREE_TYPE (x), 0));
3664 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3665 gimplify_and_add (x, ilist);
3668 /* If any copyin variable is passed by reference, we must ensure the
3669 master thread doesn't modify it before it is copied over in all
3670 threads. Similarly for variables in both firstprivate and
3671 lastprivate clauses we need to ensure the lastprivate copying
3672 happens after firstprivate copying in all threads. And similarly
3673 for UDRs if initializer expression refers to omp_orig. */
3674 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3676 /* Don't add any barrier for #pragma omp simd or
3677 #pragma omp distribute. */
3678 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3679 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
3680 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3683 /* If max_vf is non-zero, then we can use only a vectorization factor
3684 up to the max_vf we chose. So stick it into the safelen clause. */
3685 if (max_vf)
3687 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3688 OMP_CLAUSE_SAFELEN);
3689 if (c == NULL_TREE
3690 || compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3691 max_vf) == 1)
3693 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3694 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3695 max_vf);
3696 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3697 gimple_omp_for_set_clauses (ctx->stmt, c);
3703 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3704 both parallel and workshare constructs. PREDICATE may be NULL if it's
3705 always true. */
3707 static void
3708 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3709 omp_context *ctx)
3711 tree x, c, label = NULL, orig_clauses = clauses;
3712 bool par_clauses = false;
3713 tree simduid = NULL, lastlane = NULL;
3715 /* Early exit if there are no lastprivate or linear clauses. */
3716 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3717 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3718 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3719 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3720 break;
3721 if (clauses == NULL)
3723 /* If this was a workshare clause, see if it had been combined
3724 with its parallel. In that case, look for the clauses on the
3725 parallel statement itself. */
3726 if (is_parallel_ctx (ctx))
3727 return;
3729 ctx = ctx->outer;
3730 if (ctx == NULL || !is_parallel_ctx (ctx))
3731 return;
3733 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3734 OMP_CLAUSE_LASTPRIVATE);
3735 if (clauses == NULL)
3736 return;
3737 par_clauses = true;
3740 if (predicate)
3742 gimple stmt;
3743 tree label_true, arm1, arm2;
3745 label = create_artificial_label (UNKNOWN_LOCATION);
3746 label_true = create_artificial_label (UNKNOWN_LOCATION);
3747 arm1 = TREE_OPERAND (predicate, 0);
3748 arm2 = TREE_OPERAND (predicate, 1);
3749 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3750 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3751 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3752 label_true, label);
3753 gimple_seq_add_stmt (stmt_list, stmt);
3754 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3757 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3758 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
3760 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3761 if (simduid)
3762 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3765 for (c = clauses; c ;)
3767 tree var, new_var;
3768 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3770 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3771 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3772 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3774 var = OMP_CLAUSE_DECL (c);
3775 new_var = lookup_decl (var, ctx);
3777 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3779 tree val = DECL_VALUE_EXPR (new_var);
3780 if (TREE_CODE (val) == ARRAY_REF
3781 && VAR_P (TREE_OPERAND (val, 0))
3782 && lookup_attribute ("omp simd array",
3783 DECL_ATTRIBUTES (TREE_OPERAND (val,
3784 0))))
3786 if (lastlane == NULL)
3788 lastlane = create_tmp_var (unsigned_type_node, NULL);
3789 gimple g
3790 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3791 2, simduid,
3792 TREE_OPERAND (val, 1));
3793 gimple_call_set_lhs (g, lastlane);
3794 gimple_seq_add_stmt (stmt_list, g);
3796 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3797 TREE_OPERAND (val, 0), lastlane,
3798 NULL_TREE, NULL_TREE);
3802 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3803 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
3805 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
3806 gimple_seq_add_seq (stmt_list,
3807 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
3808 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
3810 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3811 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
3813 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
3814 gimple_seq_add_seq (stmt_list,
3815 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
3816 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
3819 x = build_outer_var_ref (var, ctx);
3820 if (is_reference (var))
3821 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3822 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
3823 gimplify_and_add (x, stmt_list);
3825 c = OMP_CLAUSE_CHAIN (c);
3826 if (c == NULL && !par_clauses)
3828 /* If this was a workshare clause, see if it had been combined
3829 with its parallel. In that case, continue looking for the
3830 clauses also on the parallel statement itself. */
3831 if (is_parallel_ctx (ctx))
3832 break;
3834 ctx = ctx->outer;
3835 if (ctx == NULL || !is_parallel_ctx (ctx))
3836 break;
3838 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3839 OMP_CLAUSE_LASTPRIVATE);
3840 par_clauses = true;
3844 if (label)
3845 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
3849 /* Generate code to implement the REDUCTION clauses. */
3851 static void
3852 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
3854 gimple_seq sub_seq = NULL;
3855 gimple stmt;
3856 tree x, c;
3857 int count = 0;
3859 /* SIMD reductions are handled in lower_rec_input_clauses. */
3860 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3861 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
3862 return;
3864 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3865 update in that case, otherwise use a lock. */
3866 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
3867 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
3869 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3871 /* Never use OMP_ATOMIC for array reductions or UDRs. */
3872 count = -1;
3873 break;
3875 count++;
3878 if (count == 0)
3879 return;
3881 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3883 tree var, ref, new_var;
3884 enum tree_code code;
3885 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3887 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
3888 continue;
3890 var = OMP_CLAUSE_DECL (c);
3891 new_var = lookup_decl (var, ctx);
3892 if (is_reference (var))
3893 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3894 ref = build_outer_var_ref (var, ctx);
3895 code = OMP_CLAUSE_REDUCTION_CODE (c);
3897 /* reduction(-:var) sums up the partial results, so it acts
3898 identically to reduction(+:var). */
3899 if (code == MINUS_EXPR)
3900 code = PLUS_EXPR;
3902 if (count == 1)
3904 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
3906 addr = save_expr (addr);
3907 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
3908 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
3909 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
3910 gimplify_and_add (x, stmt_seqp);
3911 return;
3914 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3916 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3918 if (is_reference (var)
3919 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3920 TREE_TYPE (ref)))
3921 ref = build_fold_addr_expr_loc (clause_loc, ref);
3922 SET_DECL_VALUE_EXPR (placeholder, ref);
3923 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3924 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
3925 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
3926 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3927 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
3929 else
3931 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3932 ref = build_outer_var_ref (var, ctx);
3933 gimplify_assign (ref, x, &sub_seq);
3937 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
3939 gimple_seq_add_stmt (stmt_seqp, stmt);
3941 gimple_seq_add_seq (stmt_seqp, sub_seq);
3943 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
3945 gimple_seq_add_stmt (stmt_seqp, stmt);
3949 /* Generate code to implement the COPYPRIVATE clauses. */
3951 static void
3952 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
3953 omp_context *ctx)
3955 tree c;
3957 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3959 tree var, new_var, ref, x;
3960 bool by_ref;
3961 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3963 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
3964 continue;
3966 var = OMP_CLAUSE_DECL (c);
3967 by_ref = use_pointer_for_field (var, NULL);
3969 ref = build_sender_ref (var, ctx);
3970 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
3971 if (by_ref)
3973 x = build_fold_addr_expr_loc (clause_loc, new_var);
3974 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
3976 gimplify_assign (ref, x, slist);
3978 ref = build_receiver_ref (var, false, ctx);
3979 if (by_ref)
3981 ref = fold_convert_loc (clause_loc,
3982 build_pointer_type (TREE_TYPE (new_var)),
3983 ref);
3984 ref = build_fold_indirect_ref_loc (clause_loc, ref);
3986 if (is_reference (var))
3988 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
3989 ref = build_simple_mem_ref_loc (clause_loc, ref);
3990 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3992 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
3993 gimplify_and_add (x, rlist);
3998 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
3999 and REDUCTION from the sender (aka parent) side. */
4001 static void
4002 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4003 omp_context *ctx)
4005 tree c;
4007 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4009 tree val, ref, x, var;
4010 bool by_ref, do_in = false, do_out = false;
4011 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4013 switch (OMP_CLAUSE_CODE (c))
4015 case OMP_CLAUSE_PRIVATE:
4016 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4017 break;
4018 continue;
4019 case OMP_CLAUSE_FIRSTPRIVATE:
4020 case OMP_CLAUSE_COPYIN:
4021 case OMP_CLAUSE_LASTPRIVATE:
4022 case OMP_CLAUSE_REDUCTION:
4023 case OMP_CLAUSE__LOOPTEMP_:
4024 break;
4025 default:
4026 continue;
4029 val = OMP_CLAUSE_DECL (c);
4030 var = lookup_decl_in_outer_ctx (val, ctx);
4032 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4033 && is_global_var (var))
4034 continue;
4035 if (is_variable_sized (val))
4036 continue;
4037 by_ref = use_pointer_for_field (val, NULL);
4039 switch (OMP_CLAUSE_CODE (c))
4041 case OMP_CLAUSE_PRIVATE:
4042 case OMP_CLAUSE_FIRSTPRIVATE:
4043 case OMP_CLAUSE_COPYIN:
4044 case OMP_CLAUSE__LOOPTEMP_:
4045 do_in = true;
4046 break;
4048 case OMP_CLAUSE_LASTPRIVATE:
4049 if (by_ref || is_reference (val))
4051 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4052 continue;
4053 do_in = true;
4055 else
4057 do_out = true;
4058 if (lang_hooks.decls.omp_private_outer_ref (val))
4059 do_in = true;
4061 break;
4063 case OMP_CLAUSE_REDUCTION:
4064 do_in = true;
4065 do_out = !(by_ref || is_reference (val));
4066 break;
4068 default:
4069 gcc_unreachable ();
4072 if (do_in)
4074 ref = build_sender_ref (val, ctx);
4075 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4076 gimplify_assign (ref, x, ilist);
4077 if (is_task_ctx (ctx))
4078 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4081 if (do_out)
4083 ref = build_sender_ref (val, ctx);
4084 gimplify_assign (var, ref, olist);
4089 /* Generate code to implement SHARED from the sender (aka parent)
4090 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4091 list things that got automatically shared. */
4093 static void
4094 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4096 tree var, ovar, nvar, f, x, record_type;
4098 if (ctx->record_type == NULL)
4099 return;
4101 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4102 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4104 ovar = DECL_ABSTRACT_ORIGIN (f);
4105 nvar = maybe_lookup_decl (ovar, ctx);
4106 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4107 continue;
4109 /* If CTX is a nested parallel directive. Find the immediately
4110 enclosing parallel or workshare construct that contains a
4111 mapping for OVAR. */
4112 var = lookup_decl_in_outer_ctx (ovar, ctx);
4114 if (use_pointer_for_field (ovar, ctx))
4116 x = build_sender_ref (ovar, ctx);
4117 var = build_fold_addr_expr (var);
4118 gimplify_assign (x, var, ilist);
4120 else
4122 x = build_sender_ref (ovar, ctx);
4123 gimplify_assign (x, var, ilist);
4125 if (!TREE_READONLY (var)
4126 /* We don't need to receive a new reference to a result
4127 or parm decl. In fact we may not store to it as we will
4128 invalidate any pending RSO and generate wrong gimple
4129 during inlining. */
4130 && !((TREE_CODE (var) == RESULT_DECL
4131 || TREE_CODE (var) == PARM_DECL)
4132 && DECL_BY_REFERENCE (var)))
4134 x = build_sender_ref (ovar, ctx);
4135 gimplify_assign (var, x, olist);
4142 /* A convenience function to build an empty GIMPLE_COND with just the
4143 condition. */
4145 static gimple
4146 gimple_build_cond_empty (tree cond)
4148 enum tree_code pred_code;
4149 tree lhs, rhs;
4151 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4152 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4156 /* Build the function calls to GOMP_parallel_start etc to actually
4157 generate the parallel operation. REGION is the parallel region
4158 being expanded. BB is the block where to insert the code. WS_ARGS
4159 will be set if this is a call to a combined parallel+workshare
4160 construct, it contains the list of additional arguments needed by
4161 the workshare construct. */
4163 static void
4164 expand_parallel_call (struct omp_region *region, basic_block bb,
4165 gimple entry_stmt, vec<tree, va_gc> *ws_args)
4167 tree t, t1, t2, val, cond, c, clauses, flags;
4168 gimple_stmt_iterator gsi;
4169 gimple stmt;
4170 enum built_in_function start_ix;
4171 int start_ix2;
4172 location_t clause_loc;
4173 vec<tree, va_gc> *args;
4175 clauses = gimple_omp_parallel_clauses (entry_stmt);
4177 /* Determine what flavor of GOMP_parallel we will be
4178 emitting. */
4179 start_ix = BUILT_IN_GOMP_PARALLEL;
4180 if (is_combined_parallel (region))
4182 switch (region->inner->type)
4184 case GIMPLE_OMP_FOR:
4185 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4186 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4187 + (region->inner->sched_kind
4188 == OMP_CLAUSE_SCHEDULE_RUNTIME
4189 ? 3 : region->inner->sched_kind));
4190 start_ix = (enum built_in_function)start_ix2;
4191 break;
4192 case GIMPLE_OMP_SECTIONS:
4193 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4194 break;
4195 default:
4196 gcc_unreachable ();
4200 /* By default, the value of NUM_THREADS is zero (selected at run time)
4201 and there is no conditional. */
4202 cond = NULL_TREE;
4203 val = build_int_cst (unsigned_type_node, 0);
4204 flags = build_int_cst (unsigned_type_node, 0);
4206 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4207 if (c)
4208 cond = OMP_CLAUSE_IF_EXPR (c);
4210 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4211 if (c)
4213 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4214 clause_loc = OMP_CLAUSE_LOCATION (c);
4216 else
4217 clause_loc = gimple_location (entry_stmt);
4219 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4220 if (c)
4221 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4223 /* Ensure 'val' is of the correct type. */
4224 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4226 /* If we found the clause 'if (cond)', build either
4227 (cond != 0) or (cond ? val : 1u). */
4228 if (cond)
4230 cond = gimple_boolify (cond);
4232 if (integer_zerop (val))
4233 val = fold_build2_loc (clause_loc,
4234 EQ_EXPR, unsigned_type_node, cond,
4235 build_int_cst (TREE_TYPE (cond), 0));
4236 else
4238 basic_block cond_bb, then_bb, else_bb;
4239 edge e, e_then, e_else;
4240 tree tmp_then, tmp_else, tmp_join, tmp_var;
4242 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4243 if (gimple_in_ssa_p (cfun))
4245 tmp_then = make_ssa_name (tmp_var, NULL);
4246 tmp_else = make_ssa_name (tmp_var, NULL);
4247 tmp_join = make_ssa_name (tmp_var, NULL);
4249 else
4251 tmp_then = tmp_var;
4252 tmp_else = tmp_var;
4253 tmp_join = tmp_var;
4256 e = split_block (bb, NULL);
4257 cond_bb = e->src;
4258 bb = e->dest;
4259 remove_edge (e);
4261 then_bb = create_empty_bb (cond_bb);
4262 else_bb = create_empty_bb (then_bb);
4263 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4264 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4266 stmt = gimple_build_cond_empty (cond);
4267 gsi = gsi_start_bb (cond_bb);
4268 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4270 gsi = gsi_start_bb (then_bb);
4271 stmt = gimple_build_assign (tmp_then, val);
4272 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4274 gsi = gsi_start_bb (else_bb);
4275 stmt = gimple_build_assign
4276 (tmp_else, build_int_cst (unsigned_type_node, 1));
4277 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4279 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4280 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4281 if (current_loops)
4283 add_bb_to_loop (then_bb, cond_bb->loop_father);
4284 add_bb_to_loop (else_bb, cond_bb->loop_father);
4286 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4287 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4289 if (gimple_in_ssa_p (cfun))
4291 gimple phi = create_phi_node (tmp_join, bb);
4292 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4293 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4296 val = tmp_join;
4299 gsi = gsi_start_bb (bb);
4300 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4301 false, GSI_CONTINUE_LINKING);
4304 gsi = gsi_last_bb (bb);
4305 t = gimple_omp_parallel_data_arg (entry_stmt);
4306 if (t == NULL)
4307 t1 = null_pointer_node;
4308 else
4309 t1 = build_fold_addr_expr (t);
4310 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4312 vec_alloc (args, 4 + vec_safe_length (ws_args));
4313 args->quick_push (t2);
4314 args->quick_push (t1);
4315 args->quick_push (val);
4316 if (ws_args)
4317 args->splice (*ws_args);
4318 args->quick_push (flags);
4320 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4321 builtin_decl_explicit (start_ix), args);
4323 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4324 false, GSI_CONTINUE_LINKING);
4328 /* Build the function call to GOMP_task to actually
4329 generate the task operation. BB is the block where to insert the code. */
4331 static void
4332 expand_task_call (basic_block bb, gimple entry_stmt)
4334 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4335 gimple_stmt_iterator gsi;
4336 location_t loc = gimple_location (entry_stmt);
4338 clauses = gimple_omp_task_clauses (entry_stmt);
4340 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4341 if (c)
4342 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4343 else
4344 cond = boolean_true_node;
4346 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4347 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4348 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4349 flags = build_int_cst (unsigned_type_node,
4350 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4352 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4353 if (c)
4355 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4356 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4357 build_int_cst (unsigned_type_node, 2),
4358 build_int_cst (unsigned_type_node, 0));
4359 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4361 if (depend)
4362 depend = OMP_CLAUSE_DECL (depend);
4363 else
4364 depend = build_int_cst (ptr_type_node, 0);
4366 gsi = gsi_last_bb (bb);
4367 t = gimple_omp_task_data_arg (entry_stmt);
4368 if (t == NULL)
4369 t2 = null_pointer_node;
4370 else
4371 t2 = build_fold_addr_expr_loc (loc, t);
4372 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4373 t = gimple_omp_task_copy_fn (entry_stmt);
4374 if (t == NULL)
4375 t3 = null_pointer_node;
4376 else
4377 t3 = build_fold_addr_expr_loc (loc, t);
4379 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4380 8, t1, t2, t3,
4381 gimple_omp_task_arg_size (entry_stmt),
4382 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4383 depend);
4385 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4386 false, GSI_CONTINUE_LINKING);
4390 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4391 catch handler and return it. This prevents programs from violating the
4392 structured block semantics with throws. */
4394 static gimple_seq
4395 maybe_catch_exception (gimple_seq body)
4397 gimple g;
4398 tree decl;
4400 if (!flag_exceptions)
4401 return body;
4403 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4404 decl = lang_hooks.eh_protect_cleanup_actions ();
4405 else
4406 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4408 g = gimple_build_eh_must_not_throw (decl);
4409 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4410 GIMPLE_TRY_CATCH);
4412 return gimple_seq_alloc_with_stmt (g);
4415 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4417 static tree
4418 vec2chain (vec<tree, va_gc> *v)
4420 tree chain = NULL_TREE, t;
4421 unsigned ix;
4423 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4425 DECL_CHAIN (t) = chain;
4426 chain = t;
4429 return chain;
4433 /* Remove barriers in REGION->EXIT's block. Note that this is only
4434 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4435 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4436 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4437 removed. */
4439 static void
4440 remove_exit_barrier (struct omp_region *region)
4442 gimple_stmt_iterator gsi;
4443 basic_block exit_bb;
4444 edge_iterator ei;
4445 edge e;
4446 gimple stmt;
4447 int any_addressable_vars = -1;
4449 exit_bb = region->exit;
4451 /* If the parallel region doesn't return, we don't have REGION->EXIT
4452 block at all. */
4453 if (! exit_bb)
4454 return;
4456 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4457 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4458 statements that can appear in between are extremely limited -- no
4459 memory operations at all. Here, we allow nothing at all, so the
4460 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4461 gsi = gsi_last_bb (exit_bb);
4462 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4463 gsi_prev (&gsi);
4464 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4465 return;
4467 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4469 gsi = gsi_last_bb (e->src);
4470 if (gsi_end_p (gsi))
4471 continue;
4472 stmt = gsi_stmt (gsi);
4473 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4474 && !gimple_omp_return_nowait_p (stmt))
4476 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4477 in many cases. If there could be tasks queued, the barrier
4478 might be needed to let the tasks run before some local
4479 variable of the parallel that the task uses as shared
4480 runs out of scope. The task can be spawned either
4481 from within current function (this would be easy to check)
4482 or from some function it calls and gets passed an address
4483 of such a variable. */
4484 if (any_addressable_vars < 0)
4486 gimple parallel_stmt = last_stmt (region->entry);
4487 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4488 tree local_decls, block, decl;
4489 unsigned ix;
4491 any_addressable_vars = 0;
4492 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4493 if (TREE_ADDRESSABLE (decl))
4495 any_addressable_vars = 1;
4496 break;
4498 for (block = gimple_block (stmt);
4499 !any_addressable_vars
4500 && block
4501 && TREE_CODE (block) == BLOCK;
4502 block = BLOCK_SUPERCONTEXT (block))
4504 for (local_decls = BLOCK_VARS (block);
4505 local_decls;
4506 local_decls = DECL_CHAIN (local_decls))
4507 if (TREE_ADDRESSABLE (local_decls))
4509 any_addressable_vars = 1;
4510 break;
4512 if (block == gimple_block (parallel_stmt))
4513 break;
4516 if (!any_addressable_vars)
4517 gimple_omp_return_set_nowait (stmt);
4522 static void
4523 remove_exit_barriers (struct omp_region *region)
4525 if (region->type == GIMPLE_OMP_PARALLEL)
4526 remove_exit_barrier (region);
4528 if (region->inner)
4530 region = region->inner;
4531 remove_exit_barriers (region);
4532 while (region->next)
4534 region = region->next;
4535 remove_exit_barriers (region);
4540 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4541 calls. These can't be declared as const functions, but
4542 within one parallel body they are constant, so they can be
4543 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4544 which are declared const. Similarly for task body, except
4545 that in untied task omp_get_thread_num () can change at any task
4546 scheduling point. */
4548 static void
4549 optimize_omp_library_calls (gimple entry_stmt)
4551 basic_block bb;
4552 gimple_stmt_iterator gsi;
4553 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4554 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4555 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4556 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4557 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4558 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4559 OMP_CLAUSE_UNTIED) != NULL);
4561 FOR_EACH_BB_FN (bb, cfun)
4562 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4564 gimple call = gsi_stmt (gsi);
4565 tree decl;
4567 if (is_gimple_call (call)
4568 && (decl = gimple_call_fndecl (call))
4569 && DECL_EXTERNAL (decl)
4570 && TREE_PUBLIC (decl)
4571 && DECL_INITIAL (decl) == NULL)
4573 tree built_in;
4575 if (DECL_NAME (decl) == thr_num_id)
4577 /* In #pragma omp task untied omp_get_thread_num () can change
4578 during the execution of the task region. */
4579 if (untied_task)
4580 continue;
4581 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4583 else if (DECL_NAME (decl) == num_thr_id)
4584 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4585 else
4586 continue;
4588 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4589 || gimple_call_num_args (call) != 0)
4590 continue;
4592 if (flag_exceptions && !TREE_NOTHROW (decl))
4593 continue;
4595 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4596 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4597 TREE_TYPE (TREE_TYPE (built_in))))
4598 continue;
4600 gimple_call_set_fndecl (call, built_in);
4605 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4606 regimplified. */
4608 static tree
4609 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4611 tree t = *tp;
4613 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4614 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4615 return t;
4617 if (TREE_CODE (t) == ADDR_EXPR)
4618 recompute_tree_invariant_for_addr_expr (t);
4620 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4621 return NULL_TREE;
4624 /* Prepend TO = FROM assignment before *GSI_P. */
4626 static void
4627 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4629 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4630 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4631 true, GSI_SAME_STMT);
4632 gimple stmt = gimple_build_assign (to, from);
4633 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4634 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4635 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4637 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4638 gimple_regimplify_operands (stmt, &gsi);
4642 /* Expand the OpenMP parallel or task directive starting at REGION. */
4644 static void
4645 expand_omp_taskreg (struct omp_region *region)
4647 basic_block entry_bb, exit_bb, new_bb;
4648 struct function *child_cfun;
4649 tree child_fn, block, t;
4650 gimple_stmt_iterator gsi;
4651 gimple entry_stmt, stmt;
4652 edge e;
4653 vec<tree, va_gc> *ws_args;
4655 entry_stmt = last_stmt (region->entry);
4656 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4657 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4659 entry_bb = region->entry;
4660 exit_bb = region->exit;
4662 if (is_combined_parallel (region))
4663 ws_args = region->ws_args;
4664 else
4665 ws_args = NULL;
4667 if (child_cfun->cfg)
4669 /* Due to inlining, it may happen that we have already outlined
4670 the region, in which case all we need to do is make the
4671 sub-graph unreachable and emit the parallel call. */
4672 edge entry_succ_e, exit_succ_e;
4674 entry_succ_e = single_succ_edge (entry_bb);
4676 gsi = gsi_last_bb (entry_bb);
4677 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4678 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4679 gsi_remove (&gsi, true);
4681 new_bb = entry_bb;
4682 if (exit_bb)
4684 exit_succ_e = single_succ_edge (exit_bb);
4685 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4687 remove_edge_and_dominated_blocks (entry_succ_e);
4689 else
4691 unsigned srcidx, dstidx, num;
4693 /* If the parallel region needs data sent from the parent
4694 function, then the very first statement (except possible
4695 tree profile counter updates) of the parallel body
4696 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4697 &.OMP_DATA_O is passed as an argument to the child function,
4698 we need to replace it with the argument as seen by the child
4699 function.
4701 In most cases, this will end up being the identity assignment
4702 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4703 a function call that has been inlined, the original PARM_DECL
4704 .OMP_DATA_I may have been converted into a different local
4705 variable. In which case, we need to keep the assignment. */
4706 if (gimple_omp_taskreg_data_arg (entry_stmt))
4708 basic_block entry_succ_bb = single_succ (entry_bb);
4709 tree arg, narg;
4710 gimple parcopy_stmt = NULL;
4712 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4714 gimple stmt;
4716 gcc_assert (!gsi_end_p (gsi));
4717 stmt = gsi_stmt (gsi);
4718 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4719 continue;
4721 if (gimple_num_ops (stmt) == 2)
4723 tree arg = gimple_assign_rhs1 (stmt);
4725 /* We're ignore the subcode because we're
4726 effectively doing a STRIP_NOPS. */
4728 if (TREE_CODE (arg) == ADDR_EXPR
4729 && TREE_OPERAND (arg, 0)
4730 == gimple_omp_taskreg_data_arg (entry_stmt))
4732 parcopy_stmt = stmt;
4733 break;
4738 gcc_assert (parcopy_stmt != NULL);
4739 arg = DECL_ARGUMENTS (child_fn);
4741 if (!gimple_in_ssa_p (cfun))
4743 if (gimple_assign_lhs (parcopy_stmt) == arg)
4744 gsi_remove (&gsi, true);
4745 else
4747 /* ?? Is setting the subcode really necessary ?? */
4748 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4749 gimple_assign_set_rhs1 (parcopy_stmt, arg);
4752 else
4754 /* If we are in ssa form, we must load the value from the default
4755 definition of the argument. That should not be defined now,
4756 since the argument is not used uninitialized. */
4757 gcc_assert (ssa_default_def (cfun, arg) == NULL);
4758 narg = make_ssa_name (arg, gimple_build_nop ());
4759 set_ssa_default_def (cfun, arg, narg);
4760 /* ?? Is setting the subcode really necessary ?? */
4761 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
4762 gimple_assign_set_rhs1 (parcopy_stmt, narg);
4763 update_stmt (parcopy_stmt);
4767 /* Declare local variables needed in CHILD_CFUN. */
4768 block = DECL_INITIAL (child_fn);
4769 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
4770 /* The gimplifier could record temporaries in parallel/task block
4771 rather than in containing function's local_decls chain,
4772 which would mean cgraph missed finalizing them. Do it now. */
4773 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
4774 if (TREE_CODE (t) == VAR_DECL
4775 && TREE_STATIC (t)
4776 && !DECL_EXTERNAL (t))
4777 varpool_finalize_decl (t);
4778 DECL_SAVED_TREE (child_fn) = NULL;
4779 /* We'll create a CFG for child_fn, so no gimple body is needed. */
4780 gimple_set_body (child_fn, NULL);
4781 TREE_USED (block) = 1;
4783 /* Reset DECL_CONTEXT on function arguments. */
4784 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
4785 DECL_CONTEXT (t) = child_fn;
4787 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
4788 so that it can be moved to the child function. */
4789 gsi = gsi_last_bb (entry_bb);
4790 stmt = gsi_stmt (gsi);
4791 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
4792 || gimple_code (stmt) == GIMPLE_OMP_TASK));
4793 gsi_remove (&gsi, true);
4794 e = split_block (entry_bb, stmt);
4795 entry_bb = e->dest;
4796 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4798 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
4799 if (exit_bb)
4801 gsi = gsi_last_bb (exit_bb);
4802 gcc_assert (!gsi_end_p (gsi)
4803 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4804 stmt = gimple_build_return (NULL);
4805 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4806 gsi_remove (&gsi, true);
4809 /* Move the parallel region into CHILD_CFUN. */
4811 if (gimple_in_ssa_p (cfun))
4813 init_tree_ssa (child_cfun);
4814 init_ssa_operands (child_cfun);
4815 child_cfun->gimple_df->in_ssa_p = true;
4816 block = NULL_TREE;
4818 else
4819 block = gimple_block (entry_stmt);
4821 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
4822 if (exit_bb)
4823 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
4824 /* When the OMP expansion process cannot guarantee an up-to-date
4825 loop tree arrange for the child function to fixup loops. */
4826 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
4827 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
4829 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
4830 num = vec_safe_length (child_cfun->local_decls);
4831 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
4833 t = (*child_cfun->local_decls)[srcidx];
4834 if (DECL_CONTEXT (t) == cfun->decl)
4835 continue;
4836 if (srcidx != dstidx)
4837 (*child_cfun->local_decls)[dstidx] = t;
4838 dstidx++;
4840 if (dstidx != num)
4841 vec_safe_truncate (child_cfun->local_decls, dstidx);
4843 /* Inform the callgraph about the new function. */
4844 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
4845 cgraph_add_new_function (child_fn, true);
4847 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4848 fixed in a following pass. */
4849 push_cfun (child_cfun);
4850 if (optimize)
4851 optimize_omp_library_calls (entry_stmt);
4852 rebuild_cgraph_edges ();
4854 /* Some EH regions might become dead, see PR34608. If
4855 pass_cleanup_cfg isn't the first pass to happen with the
4856 new child, these dead EH edges might cause problems.
4857 Clean them up now. */
4858 if (flag_exceptions)
4860 basic_block bb;
4861 bool changed = false;
4863 FOR_EACH_BB_FN (bb, cfun)
4864 changed |= gimple_purge_dead_eh_edges (bb);
4865 if (changed)
4866 cleanup_tree_cfg ();
4868 if (gimple_in_ssa_p (cfun))
4869 update_ssa (TODO_update_ssa);
4870 pop_cfun ();
4873 /* Emit a library call to launch the children threads. */
4874 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
4875 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
4876 else
4877 expand_task_call (new_bb, entry_stmt);
4878 if (gimple_in_ssa_p (cfun))
4879 update_ssa (TODO_update_ssa_only_virtuals);
4883 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4884 of the combined collapse > 1 loop constructs, generate code like:
4885 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4886 if (cond3 is <)
4887 adj = STEP3 - 1;
4888 else
4889 adj = STEP3 + 1;
4890 count3 = (adj + N32 - N31) / STEP3;
4891 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4892 if (cond2 is <)
4893 adj = STEP2 - 1;
4894 else
4895 adj = STEP2 + 1;
4896 count2 = (adj + N22 - N21) / STEP2;
4897 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4898 if (cond1 is <)
4899 adj = STEP1 - 1;
4900 else
4901 adj = STEP1 + 1;
4902 count1 = (adj + N12 - N11) / STEP1;
4903 count = count1 * count2 * count3;
4904 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4905 count = 0;
4906 and set ZERO_ITER_BB to that bb. If this isn't the outermost
4907 of the combined loop constructs, just initialize COUNTS array
4908 from the _looptemp_ clauses. */
4910 /* NOTE: It *could* be better to moosh all of the BBs together,
4911 creating one larger BB with all the computation and the unexpected
4912 jump at the end. I.e.
4914 bool zero3, zero2, zero1, zero;
4916 zero3 = N32 c3 N31;
4917 count3 = (N32 - N31) /[cl] STEP3;
4918 zero2 = N22 c2 N21;
4919 count2 = (N22 - N21) /[cl] STEP2;
4920 zero1 = N12 c1 N11;
4921 count1 = (N12 - N11) /[cl] STEP1;
4922 zero = zero3 || zero2 || zero1;
4923 count = count1 * count2 * count3;
4924 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4926 After all, we expect the zero=false, and thus we expect to have to
4927 evaluate all of the comparison expressions, so short-circuiting
4928 oughtn't be a win. Since the condition isn't protecting a
4929 denominator, we're not concerned about divide-by-zero, so we can
4930 fully evaluate count even if a numerator turned out to be wrong.
4932 It seems like putting this all together would create much better
4933 scheduling opportunities, and less pressure on the chip's branch
4934 predictor. */
4936 static void
4937 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4938 basic_block &entry_bb, tree *counts,
4939 basic_block &zero_iter_bb, int &first_zero_iter,
4940 basic_block &l2_dom_bb)
4942 tree t, type = TREE_TYPE (fd->loop.v);
4943 gimple stmt;
4944 edge e, ne;
4945 int i;
4947 /* Collapsed loops need work for expansion into SSA form. */
4948 gcc_assert (!gimple_in_ssa_p (cfun));
4950 if (gimple_omp_for_combined_into_p (fd->for_stmt)
4951 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
4953 /* First two _looptemp_ clauses are for istart/iend, counts[0]
4954 isn't supposed to be handled, as the inner loop doesn't
4955 use it. */
4956 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
4957 OMP_CLAUSE__LOOPTEMP_);
4958 gcc_assert (innerc);
4959 for (i = 0; i < fd->collapse; i++)
4961 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
4962 OMP_CLAUSE__LOOPTEMP_);
4963 gcc_assert (innerc);
4964 if (i)
4965 counts[i] = OMP_CLAUSE_DECL (innerc);
4966 else
4967 counts[0] = NULL_TREE;
4969 return;
4972 for (i = 0; i < fd->collapse; i++)
4974 tree itype = TREE_TYPE (fd->loops[i].v);
4976 if (SSA_VAR_P (fd->loop.n2)
4977 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
4978 fold_convert (itype, fd->loops[i].n1),
4979 fold_convert (itype, fd->loops[i].n2)))
4980 == NULL_TREE || !integer_onep (t)))
4982 tree n1, n2;
4983 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
4984 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
4985 true, GSI_SAME_STMT);
4986 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
4987 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
4988 true, GSI_SAME_STMT);
4989 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
4990 NULL_TREE, NULL_TREE);
4991 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4992 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4993 expand_omp_regimplify_p, NULL, NULL)
4994 || walk_tree (gimple_cond_rhs_ptr (stmt),
4995 expand_omp_regimplify_p, NULL, NULL))
4997 *gsi = gsi_for_stmt (stmt);
4998 gimple_regimplify_operands (stmt, gsi);
5000 e = split_block (entry_bb, stmt);
5001 if (zero_iter_bb == NULL)
5003 first_zero_iter = i;
5004 zero_iter_bb = create_empty_bb (entry_bb);
5005 if (current_loops)
5006 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5007 *gsi = gsi_after_labels (zero_iter_bb);
5008 stmt = gimple_build_assign (fd->loop.n2,
5009 build_zero_cst (type));
5010 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5011 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5012 entry_bb);
5014 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5015 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5016 e->flags = EDGE_TRUE_VALUE;
5017 e->probability = REG_BR_PROB_BASE - ne->probability;
5018 if (l2_dom_bb == NULL)
5019 l2_dom_bb = entry_bb;
5020 entry_bb = e->dest;
5021 *gsi = gsi_last_bb (entry_bb);
5024 if (POINTER_TYPE_P (itype))
5025 itype = signed_type_for (itype);
5026 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5027 ? -1 : 1));
5028 t = fold_build2 (PLUS_EXPR, itype,
5029 fold_convert (itype, fd->loops[i].step), t);
5030 t = fold_build2 (PLUS_EXPR, itype, t,
5031 fold_convert (itype, fd->loops[i].n2));
5032 t = fold_build2 (MINUS_EXPR, itype, t,
5033 fold_convert (itype, fd->loops[i].n1));
5034 /* ?? We could probably use CEIL_DIV_EXPR instead of
5035 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5036 generate the same code in the end because generically we
5037 don't know that the values involved must be negative for
5038 GT?? */
5039 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5040 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5041 fold_build1 (NEGATE_EXPR, itype, t),
5042 fold_build1 (NEGATE_EXPR, itype,
5043 fold_convert (itype,
5044 fd->loops[i].step)));
5045 else
5046 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5047 fold_convert (itype, fd->loops[i].step));
5048 t = fold_convert (type, t);
5049 if (TREE_CODE (t) == INTEGER_CST)
5050 counts[i] = t;
5051 else
5053 counts[i] = create_tmp_reg (type, ".count");
5054 expand_omp_build_assign (gsi, counts[i], t);
5056 if (SSA_VAR_P (fd->loop.n2))
5058 if (i == 0)
5059 t = counts[0];
5060 else
5061 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5062 expand_omp_build_assign (gsi, fd->loop.n2, t);
5068 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5069 T = V;
5070 V3 = N31 + (T % count3) * STEP3;
5071 T = T / count3;
5072 V2 = N21 + (T % count2) * STEP2;
5073 T = T / count2;
5074 V1 = N11 + T * STEP1;
5075 if this loop doesn't have an inner loop construct combined with it.
5076 If it does have an inner loop construct combined with it and the
5077 iteration count isn't known constant, store values from counts array
5078 into its _looptemp_ temporaries instead. */
5080 static void
5081 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5082 tree *counts, gimple inner_stmt, tree startvar)
5084 int i;
5085 if (gimple_omp_for_combined_p (fd->for_stmt))
5087 /* If fd->loop.n2 is constant, then no propagation of the counts
5088 is needed, they are constant. */
5089 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5090 return;
5092 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5093 ? gimple_omp_parallel_clauses (inner_stmt)
5094 : gimple_omp_for_clauses (inner_stmt);
5095 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5096 isn't supposed to be handled, as the inner loop doesn't
5097 use it. */
5098 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5099 gcc_assert (innerc);
5100 for (i = 0; i < fd->collapse; i++)
5102 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5103 OMP_CLAUSE__LOOPTEMP_);
5104 gcc_assert (innerc);
5105 if (i)
5107 tree tem = OMP_CLAUSE_DECL (innerc);
5108 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5109 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5110 false, GSI_CONTINUE_LINKING);
5111 gimple stmt = gimple_build_assign (tem, t);
5112 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5115 return;
5118 tree type = TREE_TYPE (fd->loop.v);
5119 tree tem = create_tmp_reg (type, ".tem");
5120 gimple stmt = gimple_build_assign (tem, startvar);
5121 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5123 for (i = fd->collapse - 1; i >= 0; i--)
5125 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5126 itype = vtype;
5127 if (POINTER_TYPE_P (vtype))
5128 itype = signed_type_for (vtype);
5129 if (i != 0)
5130 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5131 else
5132 t = tem;
5133 t = fold_convert (itype, t);
5134 t = fold_build2 (MULT_EXPR, itype, t,
5135 fold_convert (itype, fd->loops[i].step));
5136 if (POINTER_TYPE_P (vtype))
5137 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5138 else
5139 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5140 t = force_gimple_operand_gsi (gsi, t,
5141 DECL_P (fd->loops[i].v)
5142 && TREE_ADDRESSABLE (fd->loops[i].v),
5143 NULL_TREE, false,
5144 GSI_CONTINUE_LINKING);
5145 stmt = gimple_build_assign (fd->loops[i].v, t);
5146 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5147 if (i != 0)
5149 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5150 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5151 false, GSI_CONTINUE_LINKING);
5152 stmt = gimple_build_assign (tem, t);
5153 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5159 /* Helper function for expand_omp_for_*. Generate code like:
5160 L10:
5161 V3 += STEP3;
5162 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5163 L11:
5164 V3 = N31;
5165 V2 += STEP2;
5166 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5167 L12:
5168 V2 = N21;
5169 V1 += STEP1;
5170 goto BODY_BB; */
5172 static basic_block
5173 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5174 basic_block body_bb)
5176 basic_block last_bb, bb, collapse_bb = NULL;
5177 int i;
5178 gimple_stmt_iterator gsi;
5179 edge e;
5180 tree t;
5181 gimple stmt;
5183 last_bb = cont_bb;
5184 for (i = fd->collapse - 1; i >= 0; i--)
5186 tree vtype = TREE_TYPE (fd->loops[i].v);
5188 bb = create_empty_bb (last_bb);
5189 if (current_loops)
5190 add_bb_to_loop (bb, last_bb->loop_father);
5191 gsi = gsi_start_bb (bb);
5193 if (i < fd->collapse - 1)
5195 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5196 e->probability = REG_BR_PROB_BASE / 8;
5198 t = fd->loops[i + 1].n1;
5199 t = force_gimple_operand_gsi (&gsi, t,
5200 DECL_P (fd->loops[i + 1].v)
5201 && TREE_ADDRESSABLE (fd->loops[i
5202 + 1].v),
5203 NULL_TREE, false,
5204 GSI_CONTINUE_LINKING);
5205 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5206 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5208 else
5209 collapse_bb = bb;
5211 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5213 if (POINTER_TYPE_P (vtype))
5214 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5215 else
5216 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5217 t = force_gimple_operand_gsi (&gsi, t,
5218 DECL_P (fd->loops[i].v)
5219 && TREE_ADDRESSABLE (fd->loops[i].v),
5220 NULL_TREE, false, GSI_CONTINUE_LINKING);
5221 stmt = gimple_build_assign (fd->loops[i].v, t);
5222 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5224 if (i > 0)
5226 t = fd->loops[i].n2;
5227 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5228 false, GSI_CONTINUE_LINKING);
5229 tree v = fd->loops[i].v;
5230 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5231 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5232 false, GSI_CONTINUE_LINKING);
5233 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5234 stmt = gimple_build_cond_empty (t);
5235 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5236 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5237 e->probability = REG_BR_PROB_BASE * 7 / 8;
5239 else
5240 make_edge (bb, body_bb, EDGE_FALLTHRU);
5241 last_bb = bb;
5244 return collapse_bb;
5248 /* A subroutine of expand_omp_for. Generate code for a parallel
5249 loop with any schedule. Given parameters:
5251 for (V = N1; V cond N2; V += STEP) BODY;
5253 where COND is "<" or ">", we generate pseudocode
5255 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5256 if (more) goto L0; else goto L3;
5258 V = istart0;
5259 iend = iend0;
5261 BODY;
5262 V += STEP;
5263 if (V cond iend) goto L1; else goto L2;
5265 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5268 If this is a combined omp parallel loop, instead of the call to
5269 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5270 If this is gimple_omp_for_combined_p loop, then instead of assigning
5271 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5272 inner GIMPLE_OMP_FOR and V += STEP; and
5273 if (V cond iend) goto L1; else goto L2; are removed.
5275 For collapsed loops, given parameters:
5276 collapse(3)
5277 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5278 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5279 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5280 BODY;
5282 we generate pseudocode
5284 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5285 if (cond3 is <)
5286 adj = STEP3 - 1;
5287 else
5288 adj = STEP3 + 1;
5289 count3 = (adj + N32 - N31) / STEP3;
5290 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5291 if (cond2 is <)
5292 adj = STEP2 - 1;
5293 else
5294 adj = STEP2 + 1;
5295 count2 = (adj + N22 - N21) / STEP2;
5296 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5297 if (cond1 is <)
5298 adj = STEP1 - 1;
5299 else
5300 adj = STEP1 + 1;
5301 count1 = (adj + N12 - N11) / STEP1;
5302 count = count1 * count2 * count3;
5303 goto Z1;
5305 count = 0;
5307 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5308 if (more) goto L0; else goto L3;
5310 V = istart0;
5311 T = V;
5312 V3 = N31 + (T % count3) * STEP3;
5313 T = T / count3;
5314 V2 = N21 + (T % count2) * STEP2;
5315 T = T / count2;
5316 V1 = N11 + T * STEP1;
5317 iend = iend0;
5319 BODY;
5320 V += 1;
5321 if (V < iend) goto L10; else goto L2;
5322 L10:
5323 V3 += STEP3;
5324 if (V3 cond3 N32) goto L1; else goto L11;
5325 L11:
5326 V3 = N31;
5327 V2 += STEP2;
5328 if (V2 cond2 N22) goto L1; else goto L12;
5329 L12:
5330 V2 = N21;
5331 V1 += STEP1;
5332 goto L1;
5334 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5339 static void
5340 expand_omp_for_generic (struct omp_region *region,
5341 struct omp_for_data *fd,
5342 enum built_in_function start_fn,
5343 enum built_in_function next_fn,
5344 gimple inner_stmt)
5346 tree type, istart0, iend0, iend;
5347 tree t, vmain, vback, bias = NULL_TREE;
5348 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5349 basic_block l2_bb = NULL, l3_bb = NULL;
5350 gimple_stmt_iterator gsi;
5351 gimple stmt;
5352 bool in_combined_parallel = is_combined_parallel (region);
5353 bool broken_loop = region->cont == NULL;
5354 edge e, ne;
5355 tree *counts = NULL;
5356 int i;
5358 gcc_assert (!broken_loop || !in_combined_parallel);
5359 gcc_assert (fd->iter_type == long_integer_type_node
5360 || !in_combined_parallel);
5362 type = TREE_TYPE (fd->loop.v);
5363 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5364 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5365 TREE_ADDRESSABLE (istart0) = 1;
5366 TREE_ADDRESSABLE (iend0) = 1;
5368 /* See if we need to bias by LLONG_MIN. */
5369 if (fd->iter_type == long_long_unsigned_type_node
5370 && TREE_CODE (type) == INTEGER_TYPE
5371 && !TYPE_UNSIGNED (type))
5373 tree n1, n2;
5375 if (fd->loop.cond_code == LT_EXPR)
5377 n1 = fd->loop.n1;
5378 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5380 else
5382 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5383 n2 = fd->loop.n1;
5385 if (TREE_CODE (n1) != INTEGER_CST
5386 || TREE_CODE (n2) != INTEGER_CST
5387 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5388 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5391 entry_bb = region->entry;
5392 cont_bb = region->cont;
5393 collapse_bb = NULL;
5394 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5395 gcc_assert (broken_loop
5396 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5397 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5398 l1_bb = single_succ (l0_bb);
5399 if (!broken_loop)
5401 l2_bb = create_empty_bb (cont_bb);
5402 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5403 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5405 else
5406 l2_bb = NULL;
5407 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5408 exit_bb = region->exit;
5410 gsi = gsi_last_bb (entry_bb);
5412 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5413 if (fd->collapse > 1)
5415 int first_zero_iter = -1;
5416 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5418 counts = XALLOCAVEC (tree, fd->collapse);
5419 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5420 zero_iter_bb, first_zero_iter,
5421 l2_dom_bb);
5423 if (zero_iter_bb)
5425 /* Some counts[i] vars might be uninitialized if
5426 some loop has zero iterations. But the body shouldn't
5427 be executed in that case, so just avoid uninit warnings. */
5428 for (i = first_zero_iter; i < fd->collapse; i++)
5429 if (SSA_VAR_P (counts[i]))
5430 TREE_NO_WARNING (counts[i]) = 1;
5431 gsi_prev (&gsi);
5432 e = split_block (entry_bb, gsi_stmt (gsi));
5433 entry_bb = e->dest;
5434 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5435 gsi = gsi_last_bb (entry_bb);
5436 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5437 get_immediate_dominator (CDI_DOMINATORS,
5438 zero_iter_bb));
5441 if (in_combined_parallel)
5443 /* In a combined parallel loop, emit a call to
5444 GOMP_loop_foo_next. */
5445 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5446 build_fold_addr_expr (istart0),
5447 build_fold_addr_expr (iend0));
5449 else
5451 tree t0, t1, t2, t3, t4;
5452 /* If this is not a combined parallel loop, emit a call to
5453 GOMP_loop_foo_start in ENTRY_BB. */
5454 t4 = build_fold_addr_expr (iend0);
5455 t3 = build_fold_addr_expr (istart0);
5456 t2 = fold_convert (fd->iter_type, fd->loop.step);
5457 t1 = fd->loop.n2;
5458 t0 = fd->loop.n1;
5459 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5461 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5462 OMP_CLAUSE__LOOPTEMP_);
5463 gcc_assert (innerc);
5464 t0 = OMP_CLAUSE_DECL (innerc);
5465 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5466 OMP_CLAUSE__LOOPTEMP_);
5467 gcc_assert (innerc);
5468 t1 = OMP_CLAUSE_DECL (innerc);
5470 if (POINTER_TYPE_P (TREE_TYPE (t0))
5471 && TYPE_PRECISION (TREE_TYPE (t0))
5472 != TYPE_PRECISION (fd->iter_type))
5474 /* Avoid casting pointers to integer of a different size. */
5475 tree itype = signed_type_for (type);
5476 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5477 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5479 else
5481 t1 = fold_convert (fd->iter_type, t1);
5482 t0 = fold_convert (fd->iter_type, t0);
5484 if (bias)
5486 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5487 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5489 if (fd->iter_type == long_integer_type_node)
5491 if (fd->chunk_size)
5493 t = fold_convert (fd->iter_type, fd->chunk_size);
5494 t = build_call_expr (builtin_decl_explicit (start_fn),
5495 6, t0, t1, t2, t, t3, t4);
5497 else
5498 t = build_call_expr (builtin_decl_explicit (start_fn),
5499 5, t0, t1, t2, t3, t4);
5501 else
5503 tree t5;
5504 tree c_bool_type;
5505 tree bfn_decl;
5507 /* The GOMP_loop_ull_*start functions have additional boolean
5508 argument, true for < loops and false for > loops.
5509 In Fortran, the C bool type can be different from
5510 boolean_type_node. */
5511 bfn_decl = builtin_decl_explicit (start_fn);
5512 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5513 t5 = build_int_cst (c_bool_type,
5514 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5515 if (fd->chunk_size)
5517 tree bfn_decl = builtin_decl_explicit (start_fn);
5518 t = fold_convert (fd->iter_type, fd->chunk_size);
5519 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5521 else
5522 t = build_call_expr (builtin_decl_explicit (start_fn),
5523 6, t5, t0, t1, t2, t3, t4);
5526 if (TREE_TYPE (t) != boolean_type_node)
5527 t = fold_build2 (NE_EXPR, boolean_type_node,
5528 t, build_int_cst (TREE_TYPE (t), 0));
5529 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5530 true, GSI_SAME_STMT);
5531 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5533 /* Remove the GIMPLE_OMP_FOR statement. */
5534 gsi_remove (&gsi, true);
5536 /* Iteration setup for sequential loop goes in L0_BB. */
5537 tree startvar = fd->loop.v;
5538 tree endvar = NULL_TREE;
5540 if (gimple_omp_for_combined_p (fd->for_stmt))
5542 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5543 && gimple_omp_for_kind (inner_stmt)
5544 == GF_OMP_FOR_KIND_SIMD);
5545 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5546 OMP_CLAUSE__LOOPTEMP_);
5547 gcc_assert (innerc);
5548 startvar = OMP_CLAUSE_DECL (innerc);
5549 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5550 OMP_CLAUSE__LOOPTEMP_);
5551 gcc_assert (innerc);
5552 endvar = OMP_CLAUSE_DECL (innerc);
5555 gsi = gsi_start_bb (l0_bb);
5556 t = istart0;
5557 if (bias)
5558 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5559 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5560 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5561 t = fold_convert (TREE_TYPE (startvar), t);
5562 t = force_gimple_operand_gsi (&gsi, t,
5563 DECL_P (startvar)
5564 && TREE_ADDRESSABLE (startvar),
5565 NULL_TREE, false, GSI_CONTINUE_LINKING);
5566 stmt = gimple_build_assign (startvar, t);
5567 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5569 t = iend0;
5570 if (bias)
5571 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5572 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5573 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5574 t = fold_convert (TREE_TYPE (startvar), t);
5575 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5576 false, GSI_CONTINUE_LINKING);
5577 if (endvar)
5579 stmt = gimple_build_assign (endvar, iend);
5580 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5581 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
5582 stmt = gimple_build_assign (fd->loop.v, iend);
5583 else
5584 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, iend,
5585 NULL_TREE);
5586 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5588 if (fd->collapse > 1)
5589 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5591 if (!broken_loop)
5593 /* Code to control the increment and predicate for the sequential
5594 loop goes in the CONT_BB. */
5595 gsi = gsi_last_bb (cont_bb);
5596 stmt = gsi_stmt (gsi);
5597 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5598 vmain = gimple_omp_continue_control_use (stmt);
5599 vback = gimple_omp_continue_control_def (stmt);
5601 if (!gimple_omp_for_combined_p (fd->for_stmt))
5603 if (POINTER_TYPE_P (type))
5604 t = fold_build_pointer_plus (vmain, fd->loop.step);
5605 else
5606 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5607 t = force_gimple_operand_gsi (&gsi, t,
5608 DECL_P (vback)
5609 && TREE_ADDRESSABLE (vback),
5610 NULL_TREE, true, GSI_SAME_STMT);
5611 stmt = gimple_build_assign (vback, t);
5612 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5614 t = build2 (fd->loop.cond_code, boolean_type_node,
5615 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5616 iend);
5617 stmt = gimple_build_cond_empty (t);
5618 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5621 /* Remove GIMPLE_OMP_CONTINUE. */
5622 gsi_remove (&gsi, true);
5624 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5625 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5627 /* Emit code to get the next parallel iteration in L2_BB. */
5628 gsi = gsi_start_bb (l2_bb);
5630 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5631 build_fold_addr_expr (istart0),
5632 build_fold_addr_expr (iend0));
5633 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5634 false, GSI_CONTINUE_LINKING);
5635 if (TREE_TYPE (t) != boolean_type_node)
5636 t = fold_build2 (NE_EXPR, boolean_type_node,
5637 t, build_int_cst (TREE_TYPE (t), 0));
5638 stmt = gimple_build_cond_empty (t);
5639 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5642 /* Add the loop cleanup function. */
5643 gsi = gsi_last_bb (exit_bb);
5644 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5645 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5646 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5647 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5648 else
5649 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5650 stmt = gimple_build_call (t, 0);
5651 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5652 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5653 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5654 gsi_remove (&gsi, true);
5656 /* Connect the new blocks. */
5657 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5658 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5660 if (!broken_loop)
5662 gimple_seq phis;
5664 e = find_edge (cont_bb, l3_bb);
5665 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5667 phis = phi_nodes (l3_bb);
5668 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5670 gimple phi = gsi_stmt (gsi);
5671 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5672 PHI_ARG_DEF_FROM_EDGE (phi, e));
5674 remove_edge (e);
5676 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5677 if (current_loops)
5678 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5679 e = find_edge (cont_bb, l1_bb);
5680 if (gimple_omp_for_combined_p (fd->for_stmt))
5682 remove_edge (e);
5683 e = NULL;
5685 else if (fd->collapse > 1)
5687 remove_edge (e);
5688 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5690 else
5691 e->flags = EDGE_TRUE_VALUE;
5692 if (e)
5694 e->probability = REG_BR_PROB_BASE * 7 / 8;
5695 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5697 else
5699 e = find_edge (cont_bb, l2_bb);
5700 e->flags = EDGE_FALLTHRU;
5702 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5704 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5705 recompute_dominator (CDI_DOMINATORS, l2_bb));
5706 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5707 recompute_dominator (CDI_DOMINATORS, l3_bb));
5708 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5709 recompute_dominator (CDI_DOMINATORS, l0_bb));
5710 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5711 recompute_dominator (CDI_DOMINATORS, l1_bb));
5713 struct loop *outer_loop = alloc_loop ();
5714 outer_loop->header = l0_bb;
5715 outer_loop->latch = l2_bb;
5716 add_loop (outer_loop, l0_bb->loop_father);
5718 if (!gimple_omp_for_combined_p (fd->for_stmt))
5720 struct loop *loop = alloc_loop ();
5721 loop->header = l1_bb;
5722 /* The loop may have multiple latches. */
5723 add_loop (loop, outer_loop);
5729 /* A subroutine of expand_omp_for. Generate code for a parallel
5730 loop with static schedule and no specified chunk size. Given
5731 parameters:
5733 for (V = N1; V cond N2; V += STEP) BODY;
5735 where COND is "<" or ">", we generate pseudocode
5737 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5738 if (cond is <)
5739 adj = STEP - 1;
5740 else
5741 adj = STEP + 1;
5742 if ((__typeof (V)) -1 > 0 && cond is >)
5743 n = -(adj + N2 - N1) / -STEP;
5744 else
5745 n = (adj + N2 - N1) / STEP;
5746 q = n / nthreads;
5747 tt = n % nthreads;
5748 if (threadid < tt) goto L3; else goto L4;
5750 tt = 0;
5751 q = q + 1;
5753 s0 = q * threadid + tt;
5754 e0 = s0 + q;
5755 V = s0 * STEP + N1;
5756 if (s0 >= e0) goto L2; else goto L0;
5758 e = e0 * STEP + N1;
5760 BODY;
5761 V += STEP;
5762 if (V cond e) goto L1;
5766 static void
5767 expand_omp_for_static_nochunk (struct omp_region *region,
5768 struct omp_for_data *fd,
5769 gimple inner_stmt)
5771 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
5772 tree type, itype, vmain, vback;
5773 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
5774 basic_block body_bb, cont_bb, collapse_bb = NULL;
5775 basic_block fin_bb;
5776 gimple_stmt_iterator gsi;
5777 gimple stmt;
5778 edge ep;
5779 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
5780 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
5781 bool broken_loop = region->cont == NULL;
5782 tree *counts = NULL;
5783 tree n1, n2, step;
5785 itype = type = TREE_TYPE (fd->loop.v);
5786 if (POINTER_TYPE_P (type))
5787 itype = signed_type_for (type);
5789 entry_bb = region->entry;
5790 cont_bb = region->cont;
5791 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5792 fin_bb = BRANCH_EDGE (entry_bb)->dest;
5793 gcc_assert (broken_loop
5794 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
5795 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5796 body_bb = single_succ (seq_start_bb);
5797 if (!broken_loop)
5799 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5800 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5802 exit_bb = region->exit;
5804 /* Iteration space partitioning goes in ENTRY_BB. */
5805 gsi = gsi_last_bb (entry_bb);
5806 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5808 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
5810 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
5811 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
5814 if (fd->collapse > 1)
5816 int first_zero_iter = -1;
5817 basic_block l2_dom_bb = NULL;
5819 counts = XALLOCAVEC (tree, fd->collapse);
5820 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5821 fin_bb, first_zero_iter,
5822 l2_dom_bb);
5823 t = NULL_TREE;
5825 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
5826 t = integer_one_node;
5827 else
5828 t = fold_binary (fd->loop.cond_code, boolean_type_node,
5829 fold_convert (type, fd->loop.n1),
5830 fold_convert (type, fd->loop.n2));
5831 if (fd->collapse == 1
5832 && TYPE_UNSIGNED (type)
5833 && (t == NULL_TREE || !integer_onep (t)))
5835 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
5836 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
5837 true, GSI_SAME_STMT);
5838 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
5839 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
5840 true, GSI_SAME_STMT);
5841 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
5842 NULL_TREE, NULL_TREE);
5843 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5844 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5845 expand_omp_regimplify_p, NULL, NULL)
5846 || walk_tree (gimple_cond_rhs_ptr (stmt),
5847 expand_omp_regimplify_p, NULL, NULL))
5849 gsi = gsi_for_stmt (stmt);
5850 gimple_regimplify_operands (stmt, &gsi);
5852 ep = split_block (entry_bb, stmt);
5853 ep->flags = EDGE_TRUE_VALUE;
5854 entry_bb = ep->dest;
5855 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
5856 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
5857 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
5858 if (gimple_in_ssa_p (cfun))
5860 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
5861 for (gsi = gsi_start_phis (fin_bb);
5862 !gsi_end_p (gsi); gsi_next (&gsi))
5864 gimple phi = gsi_stmt (gsi);
5865 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
5866 ep, UNKNOWN_LOCATION);
5869 gsi = gsi_last_bb (entry_bb);
5872 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
5873 t = fold_convert (itype, t);
5874 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5875 true, GSI_SAME_STMT);
5877 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
5878 t = fold_convert (itype, t);
5879 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5880 true, GSI_SAME_STMT);
5882 n1 = fd->loop.n1;
5883 n2 = fd->loop.n2;
5884 step = fd->loop.step;
5885 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5887 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5888 OMP_CLAUSE__LOOPTEMP_);
5889 gcc_assert (innerc);
5890 n1 = OMP_CLAUSE_DECL (innerc);
5891 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5892 OMP_CLAUSE__LOOPTEMP_);
5893 gcc_assert (innerc);
5894 n2 = OMP_CLAUSE_DECL (innerc);
5896 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
5897 true, NULL_TREE, true, GSI_SAME_STMT);
5898 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
5899 true, NULL_TREE, true, GSI_SAME_STMT);
5900 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
5901 true, NULL_TREE, true, GSI_SAME_STMT);
5903 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
5904 t = fold_build2 (PLUS_EXPR, itype, step, t);
5905 t = fold_build2 (PLUS_EXPR, itype, t, n2);
5906 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
5907 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
5908 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5909 fold_build1 (NEGATE_EXPR, itype, t),
5910 fold_build1 (NEGATE_EXPR, itype, step));
5911 else
5912 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
5913 t = fold_convert (itype, t);
5914 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
5916 q = create_tmp_reg (itype, "q");
5917 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
5918 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5919 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
5921 tt = create_tmp_reg (itype, "tt");
5922 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
5923 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5924 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
5926 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
5927 stmt = gimple_build_cond_empty (t);
5928 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5930 second_bb = split_block (entry_bb, stmt)->dest;
5931 gsi = gsi_last_bb (second_bb);
5932 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5934 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
5935 GSI_SAME_STMT);
5936 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
5937 build_int_cst (itype, 1));
5938 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5940 third_bb = split_block (second_bb, stmt)->dest;
5941 gsi = gsi_last_bb (third_bb);
5942 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5944 t = build2 (MULT_EXPR, itype, q, threadid);
5945 t = build2 (PLUS_EXPR, itype, t, tt);
5946 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
5948 t = fold_build2 (PLUS_EXPR, itype, s0, q);
5949 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
5951 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
5952 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5954 /* Remove the GIMPLE_OMP_FOR statement. */
5955 gsi_remove (&gsi, true);
5957 /* Setup code for sequential iteration goes in SEQ_START_BB. */
5958 gsi = gsi_start_bb (seq_start_bb);
5960 tree startvar = fd->loop.v;
5961 tree endvar = NULL_TREE;
5963 if (gimple_omp_for_combined_p (fd->for_stmt))
5965 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5966 ? gimple_omp_parallel_clauses (inner_stmt)
5967 : gimple_omp_for_clauses (inner_stmt);
5968 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5969 gcc_assert (innerc);
5970 startvar = OMP_CLAUSE_DECL (innerc);
5971 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5972 OMP_CLAUSE__LOOPTEMP_);
5973 gcc_assert (innerc);
5974 endvar = OMP_CLAUSE_DECL (innerc);
5976 t = fold_convert (itype, s0);
5977 t = fold_build2 (MULT_EXPR, itype, t, step);
5978 if (POINTER_TYPE_P (type))
5979 t = fold_build_pointer_plus (n1, t);
5980 else
5981 t = fold_build2 (PLUS_EXPR, type, t, n1);
5982 t = fold_convert (TREE_TYPE (startvar), t);
5983 t = force_gimple_operand_gsi (&gsi, t,
5984 DECL_P (startvar)
5985 && TREE_ADDRESSABLE (startvar),
5986 NULL_TREE, false, GSI_CONTINUE_LINKING);
5987 stmt = gimple_build_assign (startvar, t);
5988 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5990 t = fold_convert (itype, e0);
5991 t = fold_build2 (MULT_EXPR, itype, t, step);
5992 if (POINTER_TYPE_P (type))
5993 t = fold_build_pointer_plus (n1, t);
5994 else
5995 t = fold_build2 (PLUS_EXPR, type, t, n1);
5996 t = fold_convert (TREE_TYPE (startvar), t);
5997 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5998 false, GSI_CONTINUE_LINKING);
5999 if (endvar)
6001 stmt = gimple_build_assign (endvar, e);
6002 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6003 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6004 stmt = gimple_build_assign (fd->loop.v, e);
6005 else
6006 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6007 NULL_TREE);
6008 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6010 if (fd->collapse > 1)
6011 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6013 if (!broken_loop)
6015 /* The code controlling the sequential loop replaces the
6016 GIMPLE_OMP_CONTINUE. */
6017 gsi = gsi_last_bb (cont_bb);
6018 stmt = gsi_stmt (gsi);
6019 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6020 vmain = gimple_omp_continue_control_use (stmt);
6021 vback = gimple_omp_continue_control_def (stmt);
6023 if (!gimple_omp_for_combined_p (fd->for_stmt))
6025 if (POINTER_TYPE_P (type))
6026 t = fold_build_pointer_plus (vmain, step);
6027 else
6028 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6029 t = force_gimple_operand_gsi (&gsi, t,
6030 DECL_P (vback)
6031 && TREE_ADDRESSABLE (vback),
6032 NULL_TREE, true, GSI_SAME_STMT);
6033 stmt = gimple_build_assign (vback, t);
6034 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6036 t = build2 (fd->loop.cond_code, boolean_type_node,
6037 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6038 ? t : vback, e);
6039 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6042 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6043 gsi_remove (&gsi, true);
6045 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6046 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6049 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6050 gsi = gsi_last_bb (exit_bb);
6051 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6053 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6054 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6056 gsi_remove (&gsi, true);
6058 /* Connect all the blocks. */
6059 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6060 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6061 ep = find_edge (entry_bb, second_bb);
6062 ep->flags = EDGE_TRUE_VALUE;
6063 ep->probability = REG_BR_PROB_BASE / 4;
6064 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6065 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6067 if (!broken_loop)
6069 ep = find_edge (cont_bb, body_bb);
6070 if (gimple_omp_for_combined_p (fd->for_stmt))
6072 remove_edge (ep);
6073 ep = NULL;
6075 else if (fd->collapse > 1)
6077 remove_edge (ep);
6078 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6080 else
6081 ep->flags = EDGE_TRUE_VALUE;
6082 find_edge (cont_bb, fin_bb)->flags
6083 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6086 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6087 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6088 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6090 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6091 recompute_dominator (CDI_DOMINATORS, body_bb));
6092 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6093 recompute_dominator (CDI_DOMINATORS, fin_bb));
6095 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6097 struct loop *loop = alloc_loop ();
6098 loop->header = body_bb;
6099 if (collapse_bb == NULL)
6100 loop->latch = cont_bb;
6101 add_loop (loop, body_bb->loop_father);
6106 /* A subroutine of expand_omp_for. Generate code for a parallel
6107 loop with static schedule and a specified chunk size. Given
6108 parameters:
6110 for (V = N1; V cond N2; V += STEP) BODY;
6112 where COND is "<" or ">", we generate pseudocode
6114 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6115 if (cond is <)
6116 adj = STEP - 1;
6117 else
6118 adj = STEP + 1;
6119 if ((__typeof (V)) -1 > 0 && cond is >)
6120 n = -(adj + N2 - N1) / -STEP;
6121 else
6122 n = (adj + N2 - N1) / STEP;
6123 trip = 0;
6124 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6125 here so that V is defined
6126 if the loop is not entered
6128 s0 = (trip * nthreads + threadid) * CHUNK;
6129 e0 = min(s0 + CHUNK, n);
6130 if (s0 < n) goto L1; else goto L4;
6132 V = s0 * STEP + N1;
6133 e = e0 * STEP + N1;
6135 BODY;
6136 V += STEP;
6137 if (V cond e) goto L2; else goto L3;
6139 trip += 1;
6140 goto L0;
6144 static void
6145 expand_omp_for_static_chunk (struct omp_region *region,
6146 struct omp_for_data *fd, gimple inner_stmt)
6148 tree n, s0, e0, e, t;
6149 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6150 tree type, itype, v_main, v_back, v_extra;
6151 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6152 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6153 gimple_stmt_iterator si;
6154 gimple stmt;
6155 edge se;
6156 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6157 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6158 bool broken_loop = region->cont == NULL;
6159 tree *counts = NULL;
6160 tree n1, n2, step;
6162 itype = type = TREE_TYPE (fd->loop.v);
6163 if (POINTER_TYPE_P (type))
6164 itype = signed_type_for (type);
6166 entry_bb = region->entry;
6167 se = split_block (entry_bb, last_stmt (entry_bb));
6168 entry_bb = se->src;
6169 iter_part_bb = se->dest;
6170 cont_bb = region->cont;
6171 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6172 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6173 gcc_assert (broken_loop
6174 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6175 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6176 body_bb = single_succ (seq_start_bb);
6177 if (!broken_loop)
6179 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6180 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6181 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6183 exit_bb = region->exit;
6185 /* Trip and adjustment setup goes in ENTRY_BB. */
6186 si = gsi_last_bb (entry_bb);
6187 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
6189 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6191 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6192 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6195 if (fd->collapse > 1)
6197 int first_zero_iter = -1;
6198 basic_block l2_dom_bb = NULL;
6200 counts = XALLOCAVEC (tree, fd->collapse);
6201 expand_omp_for_init_counts (fd, &si, entry_bb, counts,
6202 fin_bb, first_zero_iter,
6203 l2_dom_bb);
6204 t = NULL_TREE;
6206 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6207 t = integer_one_node;
6208 else
6209 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6210 fold_convert (type, fd->loop.n1),
6211 fold_convert (type, fd->loop.n2));
6212 if (fd->collapse == 1
6213 && TYPE_UNSIGNED (type)
6214 && (t == NULL_TREE || !integer_onep (t)))
6216 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6217 n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
6218 true, GSI_SAME_STMT);
6219 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6220 n2 = force_gimple_operand_gsi (&si, n2, true, NULL_TREE,
6221 true, GSI_SAME_STMT);
6222 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6223 NULL_TREE, NULL_TREE);
6224 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6225 if (walk_tree (gimple_cond_lhs_ptr (stmt),
6226 expand_omp_regimplify_p, NULL, NULL)
6227 || walk_tree (gimple_cond_rhs_ptr (stmt),
6228 expand_omp_regimplify_p, NULL, NULL))
6230 si = gsi_for_stmt (stmt);
6231 gimple_regimplify_operands (stmt, &si);
6233 se = split_block (entry_bb, stmt);
6234 se->flags = EDGE_TRUE_VALUE;
6235 entry_bb = se->dest;
6236 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6237 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6238 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6239 if (gimple_in_ssa_p (cfun))
6241 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6242 for (si = gsi_start_phis (fin_bb);
6243 !gsi_end_p (si); gsi_next (&si))
6245 gimple phi = gsi_stmt (si);
6246 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6247 se, UNKNOWN_LOCATION);
6250 si = gsi_last_bb (entry_bb);
6253 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6254 t = fold_convert (itype, t);
6255 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6256 true, GSI_SAME_STMT);
6258 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6259 t = fold_convert (itype, t);
6260 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6261 true, GSI_SAME_STMT);
6263 n1 = fd->loop.n1;
6264 n2 = fd->loop.n2;
6265 step = fd->loop.step;
6266 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6268 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6269 OMP_CLAUSE__LOOPTEMP_);
6270 gcc_assert (innerc);
6271 n1 = OMP_CLAUSE_DECL (innerc);
6272 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6273 OMP_CLAUSE__LOOPTEMP_);
6274 gcc_assert (innerc);
6275 n2 = OMP_CLAUSE_DECL (innerc);
6277 n1 = force_gimple_operand_gsi (&si, fold_convert (type, n1),
6278 true, NULL_TREE, true, GSI_SAME_STMT);
6279 n2 = force_gimple_operand_gsi (&si, fold_convert (itype, n2),
6280 true, NULL_TREE, true, GSI_SAME_STMT);
6281 step = force_gimple_operand_gsi (&si, fold_convert (itype, step),
6282 true, NULL_TREE, true, GSI_SAME_STMT);
6283 fd->chunk_size
6284 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
6285 true, NULL_TREE, true, GSI_SAME_STMT);
6287 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6288 t = fold_build2 (PLUS_EXPR, itype, step, t);
6289 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6290 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6291 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6292 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6293 fold_build1 (NEGATE_EXPR, itype, t),
6294 fold_build1 (NEGATE_EXPR, itype, step));
6295 else
6296 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6297 t = fold_convert (itype, t);
6298 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6299 true, GSI_SAME_STMT);
6301 trip_var = create_tmp_reg (itype, ".trip");
6302 if (gimple_in_ssa_p (cfun))
6304 trip_init = make_ssa_name (trip_var, NULL);
6305 trip_main = make_ssa_name (trip_var, NULL);
6306 trip_back = make_ssa_name (trip_var, NULL);
6308 else
6310 trip_init = trip_var;
6311 trip_main = trip_var;
6312 trip_back = trip_var;
6315 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
6316 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6318 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6319 t = fold_build2 (MULT_EXPR, itype, t, step);
6320 if (POINTER_TYPE_P (type))
6321 t = fold_build_pointer_plus (n1, t);
6322 else
6323 t = fold_build2 (PLUS_EXPR, type, t, n1);
6324 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6325 true, GSI_SAME_STMT);
6327 /* Remove the GIMPLE_OMP_FOR. */
6328 gsi_remove (&si, true);
6330 /* Iteration space partitioning goes in ITER_PART_BB. */
6331 si = gsi_last_bb (iter_part_bb);
6333 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6334 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6335 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6336 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6337 false, GSI_CONTINUE_LINKING);
6339 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6340 t = fold_build2 (MIN_EXPR, itype, t, n);
6341 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6342 false, GSI_CONTINUE_LINKING);
6344 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6345 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6347 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6348 si = gsi_start_bb (seq_start_bb);
6350 tree startvar = fd->loop.v;
6351 tree endvar = NULL_TREE;
6353 if (gimple_omp_for_combined_p (fd->for_stmt))
6355 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6356 ? gimple_omp_parallel_clauses (inner_stmt)
6357 : gimple_omp_for_clauses (inner_stmt);
6358 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6359 gcc_assert (innerc);
6360 startvar = OMP_CLAUSE_DECL (innerc);
6361 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6362 OMP_CLAUSE__LOOPTEMP_);
6363 gcc_assert (innerc);
6364 endvar = OMP_CLAUSE_DECL (innerc);
6367 t = fold_convert (itype, s0);
6368 t = fold_build2 (MULT_EXPR, itype, t, step);
6369 if (POINTER_TYPE_P (type))
6370 t = fold_build_pointer_plus (n1, t);
6371 else
6372 t = fold_build2 (PLUS_EXPR, type, t, n1);
6373 t = fold_convert (TREE_TYPE (startvar), t);
6374 t = force_gimple_operand_gsi (&si, t,
6375 DECL_P (startvar)
6376 && TREE_ADDRESSABLE (startvar),
6377 NULL_TREE, false, GSI_CONTINUE_LINKING);
6378 stmt = gimple_build_assign (startvar, t);
6379 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6381 t = fold_convert (itype, e0);
6382 t = fold_build2 (MULT_EXPR, itype, t, step);
6383 if (POINTER_TYPE_P (type))
6384 t = fold_build_pointer_plus (n1, t);
6385 else
6386 t = fold_build2 (PLUS_EXPR, type, t, n1);
6387 t = fold_convert (TREE_TYPE (startvar), t);
6388 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6389 false, GSI_CONTINUE_LINKING);
6390 if (endvar)
6392 stmt = gimple_build_assign (endvar, e);
6393 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6394 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6395 stmt = gimple_build_assign (fd->loop.v, e);
6396 else
6397 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6398 NULL_TREE);
6399 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6401 if (fd->collapse > 1)
6402 expand_omp_for_init_vars (fd, &si, counts, inner_stmt, startvar);
6404 if (!broken_loop)
6406 /* The code controlling the sequential loop goes in CONT_BB,
6407 replacing the GIMPLE_OMP_CONTINUE. */
6408 si = gsi_last_bb (cont_bb);
6409 stmt = gsi_stmt (si);
6410 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6411 v_main = gimple_omp_continue_control_use (stmt);
6412 v_back = gimple_omp_continue_control_def (stmt);
6414 if (!gimple_omp_for_combined_p (fd->for_stmt))
6416 if (POINTER_TYPE_P (type))
6417 t = fold_build_pointer_plus (v_main, step);
6418 else
6419 t = fold_build2 (PLUS_EXPR, type, v_main, step);
6420 if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
6421 t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6422 true, GSI_SAME_STMT);
6423 stmt = gimple_build_assign (v_back, t);
6424 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6426 t = build2 (fd->loop.cond_code, boolean_type_node,
6427 DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
6428 ? t : v_back, e);
6429 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
6432 /* Remove GIMPLE_OMP_CONTINUE. */
6433 gsi_remove (&si, true);
6435 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6436 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6438 /* Trip update code goes into TRIP_UPDATE_BB. */
6439 si = gsi_start_bb (trip_update_bb);
6441 t = build_int_cst (itype, 1);
6442 t = build2 (PLUS_EXPR, itype, trip_main, t);
6443 stmt = gimple_build_assign (trip_back, t);
6444 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6447 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6448 si = gsi_last_bb (exit_bb);
6449 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
6451 t = gimple_omp_return_lhs (gsi_stmt (si));
6452 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
6454 gsi_remove (&si, true);
6456 /* Connect the new blocks. */
6457 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6458 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6460 if (!broken_loop)
6462 se = find_edge (cont_bb, body_bb);
6463 if (gimple_omp_for_combined_p (fd->for_stmt))
6465 remove_edge (se);
6466 se = NULL;
6468 else if (fd->collapse > 1)
6470 remove_edge (se);
6471 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6473 else
6474 se->flags = EDGE_TRUE_VALUE;
6475 find_edge (cont_bb, trip_update_bb)->flags
6476 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6478 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6481 if (gimple_in_ssa_p (cfun))
6483 gimple_stmt_iterator psi;
6484 gimple phi;
6485 edge re, ene;
6486 edge_var_map_vector *head;
6487 edge_var_map *vm;
6488 size_t i;
6490 gcc_assert (fd->collapse == 1 && !broken_loop);
6492 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6493 remove arguments of the phi nodes in fin_bb. We need to create
6494 appropriate phi nodes in iter_part_bb instead. */
6495 se = single_pred_edge (fin_bb);
6496 re = single_succ_edge (trip_update_bb);
6497 head = redirect_edge_var_map_vector (re);
6498 ene = single_succ_edge (entry_bb);
6500 psi = gsi_start_phis (fin_bb);
6501 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6502 gsi_next (&psi), ++i)
6504 gimple nphi;
6505 source_location locus;
6507 phi = gsi_stmt (psi);
6508 t = gimple_phi_result (phi);
6509 gcc_assert (t == redirect_edge_var_map_result (vm));
6510 nphi = create_phi_node (t, iter_part_bb);
6512 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6513 locus = gimple_phi_arg_location_from_edge (phi, se);
6515 /* A special case -- fd->loop.v is not yet computed in
6516 iter_part_bb, we need to use v_extra instead. */
6517 if (t == fd->loop.v)
6518 t = v_extra;
6519 add_phi_arg (nphi, t, ene, locus);
6520 locus = redirect_edge_var_map_location (vm);
6521 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6523 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6524 redirect_edge_var_map_clear (re);
6525 while (1)
6527 psi = gsi_start_phis (fin_bb);
6528 if (gsi_end_p (psi))
6529 break;
6530 remove_phi_node (&psi, false);
6533 /* Make phi node for trip. */
6534 phi = create_phi_node (trip_main, iter_part_bb);
6535 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6536 UNKNOWN_LOCATION);
6537 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6538 UNKNOWN_LOCATION);
6541 if (!broken_loop)
6542 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6543 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6544 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6545 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6546 recompute_dominator (CDI_DOMINATORS, fin_bb));
6547 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6548 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6549 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6550 recompute_dominator (CDI_DOMINATORS, body_bb));
6552 if (!broken_loop)
6554 struct loop *trip_loop = alloc_loop ();
6555 trip_loop->header = iter_part_bb;
6556 trip_loop->latch = trip_update_bb;
6557 add_loop (trip_loop, iter_part_bb->loop_father);
6559 if (!gimple_omp_for_combined_p (fd->for_stmt))
6561 struct loop *loop = alloc_loop ();
6562 loop->header = body_bb;
6563 if (collapse_bb == NULL)
6564 loop->latch = cont_bb;
6565 add_loop (loop, trip_loop);
6571 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
6572 loop. Given parameters:
6574 for (V = N1; V cond N2; V += STEP) BODY;
6576 where COND is "<" or ">", we generate pseudocode
6578 V = N1;
6579 goto L1;
6581 BODY;
6582 V += STEP;
6584 if (V cond N2) goto L0; else goto L2;
6587 For collapsed loops, given parameters:
6588 collapse(3)
6589 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6590 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6591 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6592 BODY;
6594 we generate pseudocode
6596 if (cond3 is <)
6597 adj = STEP3 - 1;
6598 else
6599 adj = STEP3 + 1;
6600 count3 = (adj + N32 - N31) / STEP3;
6601 if (cond2 is <)
6602 adj = STEP2 - 1;
6603 else
6604 adj = STEP2 + 1;
6605 count2 = (adj + N22 - N21) / STEP2;
6606 if (cond1 is <)
6607 adj = STEP1 - 1;
6608 else
6609 adj = STEP1 + 1;
6610 count1 = (adj + N12 - N11) / STEP1;
6611 count = count1 * count2 * count3;
6612 V = 0;
6613 V1 = N11;
6614 V2 = N21;
6615 V3 = N31;
6616 goto L1;
6618 BODY;
6619 V += 1;
6620 V3 += STEP3;
6621 V2 += (V3 cond3 N32) ? 0 : STEP2;
6622 V3 = (V3 cond3 N32) ? V3 : N31;
6623 V1 += (V2 cond2 N22) ? 0 : STEP1;
6624 V2 = (V2 cond2 N22) ? V2 : N21;
6626 if (V < count) goto L0; else goto L2;
6631 static void
6632 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
6634 tree type, t;
6635 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
6636 gimple_stmt_iterator gsi;
6637 gimple stmt;
6638 bool broken_loop = region->cont == NULL;
6639 edge e, ne;
6640 tree *counts = NULL;
6641 int i;
6642 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6643 OMP_CLAUSE_SAFELEN);
6644 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6645 OMP_CLAUSE__SIMDUID_);
6646 tree n1, n2;
6648 type = TREE_TYPE (fd->loop.v);
6649 entry_bb = region->entry;
6650 cont_bb = region->cont;
6651 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6652 gcc_assert (broken_loop
6653 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6654 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6655 if (!broken_loop)
6657 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6658 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6659 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6660 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6662 else
6664 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6665 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6666 l2_bb = single_succ (l1_bb);
6668 exit_bb = region->exit;
6669 l2_dom_bb = NULL;
6671 gsi = gsi_last_bb (entry_bb);
6673 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6674 /* Not needed in SSA form right now. */
6675 gcc_assert (!gimple_in_ssa_p (cfun));
6676 if (fd->collapse > 1)
6678 int first_zero_iter = -1;
6679 basic_block zero_iter_bb = l2_bb;
6681 counts = XALLOCAVEC (tree, fd->collapse);
6682 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6683 zero_iter_bb, first_zero_iter,
6684 l2_dom_bb);
6686 if (l2_dom_bb == NULL)
6687 l2_dom_bb = l1_bb;
6689 n1 = fd->loop.n1;
6690 n2 = fd->loop.n2;
6691 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6693 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6694 OMP_CLAUSE__LOOPTEMP_);
6695 gcc_assert (innerc);
6696 n1 = OMP_CLAUSE_DECL (innerc);
6697 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6698 OMP_CLAUSE__LOOPTEMP_);
6699 gcc_assert (innerc);
6700 n2 = OMP_CLAUSE_DECL (innerc);
6701 expand_omp_build_assign (&gsi, fd->loop.v,
6702 fold_convert (type, n1));
6703 if (fd->collapse > 1)
6705 gsi_prev (&gsi);
6706 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
6707 gsi_next (&gsi);
6710 else
6712 expand_omp_build_assign (&gsi, fd->loop.v,
6713 fold_convert (type, fd->loop.n1));
6714 if (fd->collapse > 1)
6715 for (i = 0; i < fd->collapse; i++)
6717 tree itype = TREE_TYPE (fd->loops[i].v);
6718 if (POINTER_TYPE_P (itype))
6719 itype = signed_type_for (itype);
6720 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
6721 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6725 /* Remove the GIMPLE_OMP_FOR statement. */
6726 gsi_remove (&gsi, true);
6728 if (!broken_loop)
6730 /* Code to control the increment goes in the CONT_BB. */
6731 gsi = gsi_last_bb (cont_bb);
6732 stmt = gsi_stmt (gsi);
6733 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6735 if (POINTER_TYPE_P (type))
6736 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
6737 else
6738 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
6739 expand_omp_build_assign (&gsi, fd->loop.v, t);
6741 if (fd->collapse > 1)
6743 i = fd->collapse - 1;
6744 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
6746 t = fold_convert (sizetype, fd->loops[i].step);
6747 t = fold_build_pointer_plus (fd->loops[i].v, t);
6749 else
6751 t = fold_convert (TREE_TYPE (fd->loops[i].v),
6752 fd->loops[i].step);
6753 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
6754 fd->loops[i].v, t);
6756 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6758 for (i = fd->collapse - 1; i > 0; i--)
6760 tree itype = TREE_TYPE (fd->loops[i].v);
6761 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
6762 if (POINTER_TYPE_P (itype2))
6763 itype2 = signed_type_for (itype2);
6764 t = build3 (COND_EXPR, itype2,
6765 build2 (fd->loops[i].cond_code, boolean_type_node,
6766 fd->loops[i].v,
6767 fold_convert (itype, fd->loops[i].n2)),
6768 build_int_cst (itype2, 0),
6769 fold_convert (itype2, fd->loops[i - 1].step));
6770 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
6771 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
6772 else
6773 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
6774 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
6776 t = build3 (COND_EXPR, itype,
6777 build2 (fd->loops[i].cond_code, boolean_type_node,
6778 fd->loops[i].v,
6779 fold_convert (itype, fd->loops[i].n2)),
6780 fd->loops[i].v,
6781 fold_convert (itype, fd->loops[i].n1));
6782 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6786 /* Remove GIMPLE_OMP_CONTINUE. */
6787 gsi_remove (&gsi, true);
6790 /* Emit the condition in L1_BB. */
6791 gsi = gsi_start_bb (l1_bb);
6793 t = fold_convert (type, n2);
6794 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6795 false, GSI_CONTINUE_LINKING);
6796 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
6797 stmt = gimple_build_cond_empty (t);
6798 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6799 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
6800 NULL, NULL)
6801 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
6802 NULL, NULL))
6804 gsi = gsi_for_stmt (stmt);
6805 gimple_regimplify_operands (stmt, &gsi);
6808 /* Remove GIMPLE_OMP_RETURN. */
6809 gsi = gsi_last_bb (exit_bb);
6810 gsi_remove (&gsi, true);
6812 /* Connect the new blocks. */
6813 remove_edge (FALLTHRU_EDGE (entry_bb));
6815 if (!broken_loop)
6817 remove_edge (BRANCH_EDGE (entry_bb));
6818 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6820 e = BRANCH_EDGE (l1_bb);
6821 ne = FALLTHRU_EDGE (l1_bb);
6822 e->flags = EDGE_TRUE_VALUE;
6824 else
6826 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6828 ne = single_succ_edge (l1_bb);
6829 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6832 ne->flags = EDGE_FALSE_VALUE;
6833 e->probability = REG_BR_PROB_BASE * 7 / 8;
6834 ne->probability = REG_BR_PROB_BASE / 8;
6836 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6837 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6838 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6840 if (!broken_loop)
6842 struct loop *loop = alloc_loop ();
6843 loop->header = l1_bb;
6844 loop->latch = cont_bb;
6845 add_loop (loop, l1_bb->loop_father);
6846 if (safelen == NULL_TREE)
6847 loop->safelen = INT_MAX;
6848 else
6850 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
6851 if (!tree_fits_uhwi_p (safelen)
6852 || tree_to_uhwi (safelen) > INT_MAX)
6853 loop->safelen = INT_MAX;
6854 else
6855 loop->safelen = tree_to_uhwi (safelen);
6856 if (loop->safelen == 1)
6857 loop->safelen = 0;
6859 if (simduid)
6861 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
6862 cfun->has_simduid_loops = true;
6864 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
6865 the loop. */
6866 if ((flag_tree_loop_vectorize
6867 || (!global_options_set.x_flag_tree_loop_vectorize
6868 && !global_options_set.x_flag_tree_vectorize))
6869 && flag_tree_loop_optimize
6870 && loop->safelen > 1)
6872 loop->force_vectorize = true;
6873 cfun->has_force_vectorize_loops = true;
6879 /* Expand the OpenMP loop defined by REGION. */
6881 static void
6882 expand_omp_for (struct omp_region *region, gimple inner_stmt)
6884 struct omp_for_data fd;
6885 struct omp_for_data_loop *loops;
6887 loops
6888 = (struct omp_for_data_loop *)
6889 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
6890 * sizeof (struct omp_for_data_loop));
6891 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
6892 region->sched_kind = fd.sched_kind;
6894 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
6895 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6896 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6897 if (region->cont)
6899 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
6900 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6901 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6903 else
6904 /* If there isn't a continue then this is a degerate case where
6905 the introduction of abnormal edges during lowering will prevent
6906 original loops from being detected. Fix that up. */
6907 loops_state_set (LOOPS_NEED_FIXUP);
6909 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_KIND_SIMD)
6910 expand_omp_simd (region, &fd);
6911 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
6912 && !fd.have_ordered)
6914 if (fd.chunk_size == NULL)
6915 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
6916 else
6917 expand_omp_for_static_chunk (region, &fd, inner_stmt);
6919 else
6921 int fn_index, start_ix, next_ix;
6923 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
6924 == GF_OMP_FOR_KIND_FOR);
6925 if (fd.chunk_size == NULL
6926 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
6927 fd.chunk_size = integer_zero_node;
6928 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
6929 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
6930 ? 3 : fd.sched_kind;
6931 fn_index += fd.have_ordered * 4;
6932 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
6933 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
6934 if (fd.iter_type == long_long_unsigned_type_node)
6936 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
6937 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
6938 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
6939 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
6941 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
6942 (enum built_in_function) next_ix, inner_stmt);
6945 if (gimple_in_ssa_p (cfun))
6946 update_ssa (TODO_update_ssa_only_virtuals);
6950 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
6952 v = GOMP_sections_start (n);
6954 switch (v)
6956 case 0:
6957 goto L2;
6958 case 1:
6959 section 1;
6960 goto L1;
6961 case 2:
6963 case n:
6965 default:
6966 abort ();
6969 v = GOMP_sections_next ();
6970 goto L0;
6972 reduction;
6974 If this is a combined parallel sections, replace the call to
6975 GOMP_sections_start with call to GOMP_sections_next. */
6977 static void
6978 expand_omp_sections (struct omp_region *region)
6980 tree t, u, vin = NULL, vmain, vnext, l2;
6981 unsigned len;
6982 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
6983 gimple_stmt_iterator si, switch_si;
6984 gimple sections_stmt, stmt, cont;
6985 edge_iterator ei;
6986 edge e;
6987 struct omp_region *inner;
6988 unsigned i, casei;
6989 bool exit_reachable = region->cont != NULL;
6991 gcc_assert (region->exit != NULL);
6992 entry_bb = region->entry;
6993 l0_bb = single_succ (entry_bb);
6994 l1_bb = region->cont;
6995 l2_bb = region->exit;
6996 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
6997 l2 = gimple_block_label (l2_bb);
6998 else
7000 /* This can happen if there are reductions. */
7001 len = EDGE_COUNT (l0_bb->succs);
7002 gcc_assert (len > 0);
7003 e = EDGE_SUCC (l0_bb, len - 1);
7004 si = gsi_last_bb (e->dest);
7005 l2 = NULL_TREE;
7006 if (gsi_end_p (si)
7007 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7008 l2 = gimple_block_label (e->dest);
7009 else
7010 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7012 si = gsi_last_bb (e->dest);
7013 if (gsi_end_p (si)
7014 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7016 l2 = gimple_block_label (e->dest);
7017 break;
7021 if (exit_reachable)
7022 default_bb = create_empty_bb (l1_bb->prev_bb);
7023 else
7024 default_bb = create_empty_bb (l0_bb);
7026 /* We will build a switch() with enough cases for all the
7027 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7028 and a default case to abort if something goes wrong. */
7029 len = EDGE_COUNT (l0_bb->succs);
7031 /* Use vec::quick_push on label_vec throughout, since we know the size
7032 in advance. */
7033 auto_vec<tree> label_vec (len);
7035 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7036 GIMPLE_OMP_SECTIONS statement. */
7037 si = gsi_last_bb (entry_bb);
7038 sections_stmt = gsi_stmt (si);
7039 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7040 vin = gimple_omp_sections_control (sections_stmt);
7041 if (!is_combined_parallel (region))
7043 /* If we are not inside a combined parallel+sections region,
7044 call GOMP_sections_start. */
7045 t = build_int_cst (unsigned_type_node, len - 1);
7046 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7047 stmt = gimple_build_call (u, 1, t);
7049 else
7051 /* Otherwise, call GOMP_sections_next. */
7052 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7053 stmt = gimple_build_call (u, 0);
7055 gimple_call_set_lhs (stmt, vin);
7056 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7057 gsi_remove (&si, true);
7059 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7060 L0_BB. */
7061 switch_si = gsi_last_bb (l0_bb);
7062 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
7063 if (exit_reachable)
7065 cont = last_stmt (l1_bb);
7066 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
7067 vmain = gimple_omp_continue_control_use (cont);
7068 vnext = gimple_omp_continue_control_def (cont);
7070 else
7072 vmain = vin;
7073 vnext = NULL_TREE;
7076 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
7077 label_vec.quick_push (t);
7078 i = 1;
7080 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7081 for (inner = region->inner, casei = 1;
7082 inner;
7083 inner = inner->next, i++, casei++)
7085 basic_block s_entry_bb, s_exit_bb;
7087 /* Skip optional reduction region. */
7088 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7090 --i;
7091 --casei;
7092 continue;
7095 s_entry_bb = inner->entry;
7096 s_exit_bb = inner->exit;
7098 t = gimple_block_label (s_entry_bb);
7099 u = build_int_cst (unsigned_type_node, casei);
7100 u = build_case_label (u, NULL, t);
7101 label_vec.quick_push (u);
7103 si = gsi_last_bb (s_entry_bb);
7104 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7105 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7106 gsi_remove (&si, true);
7107 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7109 if (s_exit_bb == NULL)
7110 continue;
7112 si = gsi_last_bb (s_exit_bb);
7113 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7114 gsi_remove (&si, true);
7116 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7119 /* Error handling code goes in DEFAULT_BB. */
7120 t = gimple_block_label (default_bb);
7121 u = build_case_label (NULL, NULL, t);
7122 make_edge (l0_bb, default_bb, 0);
7123 if (current_loops)
7124 add_bb_to_loop (default_bb, current_loops->tree_root);
7126 stmt = gimple_build_switch (vmain, u, label_vec);
7127 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7128 gsi_remove (&switch_si, true);
7130 si = gsi_start_bb (default_bb);
7131 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7132 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7134 if (exit_reachable)
7136 tree bfn_decl;
7138 /* Code to get the next section goes in L1_BB. */
7139 si = gsi_last_bb (l1_bb);
7140 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7142 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7143 stmt = gimple_build_call (bfn_decl, 0);
7144 gimple_call_set_lhs (stmt, vnext);
7145 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7146 gsi_remove (&si, true);
7148 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7151 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7152 si = gsi_last_bb (l2_bb);
7153 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7154 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7155 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7156 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7157 else
7158 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7159 stmt = gimple_build_call (t, 0);
7160 if (gimple_omp_return_lhs (gsi_stmt (si)))
7161 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7162 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7163 gsi_remove (&si, true);
7165 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7169 /* Expand code for an OpenMP single directive. We've already expanded
7170 much of the code, here we simply place the GOMP_barrier call. */
7172 static void
7173 expand_omp_single (struct omp_region *region)
7175 basic_block entry_bb, exit_bb;
7176 gimple_stmt_iterator si;
7178 entry_bb = region->entry;
7179 exit_bb = region->exit;
7181 si = gsi_last_bb (entry_bb);
7182 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7183 gsi_remove (&si, true);
7184 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7186 si = gsi_last_bb (exit_bb);
7187 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7189 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7190 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7192 gsi_remove (&si, true);
7193 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7197 /* Generic expansion for OpenMP synchronization directives: master,
7198 ordered and critical. All we need to do here is remove the entry
7199 and exit markers for REGION. */
7201 static void
7202 expand_omp_synch (struct omp_region *region)
7204 basic_block entry_bb, exit_bb;
7205 gimple_stmt_iterator si;
7207 entry_bb = region->entry;
7208 exit_bb = region->exit;
7210 si = gsi_last_bb (entry_bb);
7211 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7212 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7213 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7214 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7215 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7216 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7217 gsi_remove (&si, true);
7218 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7220 if (exit_bb)
7222 si = gsi_last_bb (exit_bb);
7223 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7224 gsi_remove (&si, true);
7225 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7229 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7230 operation as a normal volatile load. */
7232 static bool
7233 expand_omp_atomic_load (basic_block load_bb, tree addr,
7234 tree loaded_val, int index)
7236 enum built_in_function tmpbase;
7237 gimple_stmt_iterator gsi;
7238 basic_block store_bb;
7239 location_t loc;
7240 gimple stmt;
7241 tree decl, call, type, itype;
7243 gsi = gsi_last_bb (load_bb);
7244 stmt = gsi_stmt (gsi);
7245 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7246 loc = gimple_location (stmt);
7248 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7249 is smaller than word size, then expand_atomic_load assumes that the load
7250 is atomic. We could avoid the builtin entirely in this case. */
7252 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7253 decl = builtin_decl_explicit (tmpbase);
7254 if (decl == NULL_TREE)
7255 return false;
7257 type = TREE_TYPE (loaded_val);
7258 itype = TREE_TYPE (TREE_TYPE (decl));
7260 call = build_call_expr_loc (loc, decl, 2, addr,
7261 build_int_cst (NULL,
7262 gimple_omp_atomic_seq_cst_p (stmt)
7263 ? MEMMODEL_SEQ_CST
7264 : MEMMODEL_RELAXED));
7265 if (!useless_type_conversion_p (type, itype))
7266 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7267 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7269 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7270 gsi_remove (&gsi, true);
7272 store_bb = single_succ (load_bb);
7273 gsi = gsi_last_bb (store_bb);
7274 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7275 gsi_remove (&gsi, true);
7277 if (gimple_in_ssa_p (cfun))
7278 update_ssa (TODO_update_ssa_no_phi);
7280 return true;
7283 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7284 operation as a normal volatile store. */
7286 static bool
7287 expand_omp_atomic_store (basic_block load_bb, tree addr,
7288 tree loaded_val, tree stored_val, int index)
7290 enum built_in_function tmpbase;
7291 gimple_stmt_iterator gsi;
7292 basic_block store_bb = single_succ (load_bb);
7293 location_t loc;
7294 gimple stmt;
7295 tree decl, call, type, itype;
7296 enum machine_mode imode;
7297 bool exchange;
7299 gsi = gsi_last_bb (load_bb);
7300 stmt = gsi_stmt (gsi);
7301 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7303 /* If the load value is needed, then this isn't a store but an exchange. */
7304 exchange = gimple_omp_atomic_need_value_p (stmt);
7306 gsi = gsi_last_bb (store_bb);
7307 stmt = gsi_stmt (gsi);
7308 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7309 loc = gimple_location (stmt);
7311 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7312 is smaller than word size, then expand_atomic_store assumes that the store
7313 is atomic. We could avoid the builtin entirely in this case. */
7315 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7316 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7317 decl = builtin_decl_explicit (tmpbase);
7318 if (decl == NULL_TREE)
7319 return false;
7321 type = TREE_TYPE (stored_val);
7323 /* Dig out the type of the function's second argument. */
7324 itype = TREE_TYPE (decl);
7325 itype = TYPE_ARG_TYPES (itype);
7326 itype = TREE_CHAIN (itype);
7327 itype = TREE_VALUE (itype);
7328 imode = TYPE_MODE (itype);
7330 if (exchange && !can_atomic_exchange_p (imode, true))
7331 return false;
7333 if (!useless_type_conversion_p (itype, type))
7334 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7335 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7336 build_int_cst (NULL,
7337 gimple_omp_atomic_seq_cst_p (stmt)
7338 ? MEMMODEL_SEQ_CST
7339 : MEMMODEL_RELAXED));
7340 if (exchange)
7342 if (!useless_type_conversion_p (type, itype))
7343 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7344 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7347 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7348 gsi_remove (&gsi, true);
7350 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7351 gsi = gsi_last_bb (load_bb);
7352 gsi_remove (&gsi, true);
7354 if (gimple_in_ssa_p (cfun))
7355 update_ssa (TODO_update_ssa_no_phi);
7357 return true;
7360 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7361 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7362 size of the data type, and thus usable to find the index of the builtin
7363 decl. Returns false if the expression is not of the proper form. */
7365 static bool
7366 expand_omp_atomic_fetch_op (basic_block load_bb,
7367 tree addr, tree loaded_val,
7368 tree stored_val, int index)
7370 enum built_in_function oldbase, newbase, tmpbase;
7371 tree decl, itype, call;
7372 tree lhs, rhs;
7373 basic_block store_bb = single_succ (load_bb);
7374 gimple_stmt_iterator gsi;
7375 gimple stmt;
7376 location_t loc;
7377 enum tree_code code;
7378 bool need_old, need_new;
7379 enum machine_mode imode;
7380 bool seq_cst;
7382 /* We expect to find the following sequences:
7384 load_bb:
7385 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7387 store_bb:
7388 val = tmp OP something; (or: something OP tmp)
7389 GIMPLE_OMP_STORE (val)
7391 ???FIXME: Allow a more flexible sequence.
7392 Perhaps use data flow to pick the statements.
7396 gsi = gsi_after_labels (store_bb);
7397 stmt = gsi_stmt (gsi);
7398 loc = gimple_location (stmt);
7399 if (!is_gimple_assign (stmt))
7400 return false;
7401 gsi_next (&gsi);
7402 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7403 return false;
7404 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7405 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7406 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7407 gcc_checking_assert (!need_old || !need_new);
7409 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7410 return false;
7412 /* Check for one of the supported fetch-op operations. */
7413 code = gimple_assign_rhs_code (stmt);
7414 switch (code)
7416 case PLUS_EXPR:
7417 case POINTER_PLUS_EXPR:
7418 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7419 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7420 break;
7421 case MINUS_EXPR:
7422 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7423 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7424 break;
7425 case BIT_AND_EXPR:
7426 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7427 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7428 break;
7429 case BIT_IOR_EXPR:
7430 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7431 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7432 break;
7433 case BIT_XOR_EXPR:
7434 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7435 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7436 break;
7437 default:
7438 return false;
7441 /* Make sure the expression is of the proper form. */
7442 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7443 rhs = gimple_assign_rhs2 (stmt);
7444 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7445 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7446 rhs = gimple_assign_rhs1 (stmt);
7447 else
7448 return false;
7450 tmpbase = ((enum built_in_function)
7451 ((need_new ? newbase : oldbase) + index + 1));
7452 decl = builtin_decl_explicit (tmpbase);
7453 if (decl == NULL_TREE)
7454 return false;
7455 itype = TREE_TYPE (TREE_TYPE (decl));
7456 imode = TYPE_MODE (itype);
7458 /* We could test all of the various optabs involved, but the fact of the
7459 matter is that (with the exception of i486 vs i586 and xadd) all targets
7460 that support any atomic operaton optab also implements compare-and-swap.
7461 Let optabs.c take care of expanding any compare-and-swap loop. */
7462 if (!can_compare_and_swap_p (imode, true))
7463 return false;
7465 gsi = gsi_last_bb (load_bb);
7466 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7468 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7469 It only requires that the operation happen atomically. Thus we can
7470 use the RELAXED memory model. */
7471 call = build_call_expr_loc (loc, decl, 3, addr,
7472 fold_convert_loc (loc, itype, rhs),
7473 build_int_cst (NULL,
7474 seq_cst ? MEMMODEL_SEQ_CST
7475 : MEMMODEL_RELAXED));
7477 if (need_old || need_new)
7479 lhs = need_old ? loaded_val : stored_val;
7480 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7481 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7483 else
7484 call = fold_convert_loc (loc, void_type_node, call);
7485 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7486 gsi_remove (&gsi, true);
7488 gsi = gsi_last_bb (store_bb);
7489 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7490 gsi_remove (&gsi, true);
7491 gsi = gsi_last_bb (store_bb);
7492 gsi_remove (&gsi, true);
7494 if (gimple_in_ssa_p (cfun))
7495 update_ssa (TODO_update_ssa_no_phi);
7497 return true;
7500 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7502 oldval = *addr;
7503 repeat:
7504 newval = rhs; // with oldval replacing *addr in rhs
7505 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7506 if (oldval != newval)
7507 goto repeat;
7509 INDEX is log2 of the size of the data type, and thus usable to find the
7510 index of the builtin decl. */
7512 static bool
7513 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7514 tree addr, tree loaded_val, tree stored_val,
7515 int index)
7517 tree loadedi, storedi, initial, new_storedi, old_vali;
7518 tree type, itype, cmpxchg, iaddr;
7519 gimple_stmt_iterator si;
7520 basic_block loop_header = single_succ (load_bb);
7521 gimple phi, stmt;
7522 edge e;
7523 enum built_in_function fncode;
7525 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7526 order to use the RELAXED memory model effectively. */
7527 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7528 + index + 1);
7529 cmpxchg = builtin_decl_explicit (fncode);
7530 if (cmpxchg == NULL_TREE)
7531 return false;
7532 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7533 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7535 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7536 return false;
7538 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7539 si = gsi_last_bb (load_bb);
7540 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7542 /* For floating-point values, we'll need to view-convert them to integers
7543 so that we can perform the atomic compare and swap. Simplify the
7544 following code by always setting up the "i"ntegral variables. */
7545 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7547 tree iaddr_val;
7549 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7550 true), NULL);
7551 iaddr_val
7552 = force_gimple_operand_gsi (&si,
7553 fold_convert (TREE_TYPE (iaddr), addr),
7554 false, NULL_TREE, true, GSI_SAME_STMT);
7555 stmt = gimple_build_assign (iaddr, iaddr_val);
7556 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7557 loadedi = create_tmp_var (itype, NULL);
7558 if (gimple_in_ssa_p (cfun))
7559 loadedi = make_ssa_name (loadedi, NULL);
7561 else
7563 iaddr = addr;
7564 loadedi = loaded_val;
7567 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7568 tree loaddecl = builtin_decl_explicit (fncode);
7569 if (loaddecl)
7570 initial
7571 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
7572 build_call_expr (loaddecl, 2, iaddr,
7573 build_int_cst (NULL_TREE,
7574 MEMMODEL_RELAXED)));
7575 else
7576 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
7577 build_int_cst (TREE_TYPE (iaddr), 0));
7579 initial
7580 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
7581 GSI_SAME_STMT);
7583 /* Move the value to the LOADEDI temporary. */
7584 if (gimple_in_ssa_p (cfun))
7586 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
7587 phi = create_phi_node (loadedi, loop_header);
7588 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
7589 initial);
7591 else
7592 gsi_insert_before (&si,
7593 gimple_build_assign (loadedi, initial),
7594 GSI_SAME_STMT);
7595 if (loadedi != loaded_val)
7597 gimple_stmt_iterator gsi2;
7598 tree x;
7600 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
7601 gsi2 = gsi_start_bb (loop_header);
7602 if (gimple_in_ssa_p (cfun))
7604 gimple stmt;
7605 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7606 true, GSI_SAME_STMT);
7607 stmt = gimple_build_assign (loaded_val, x);
7608 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
7610 else
7612 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
7613 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7614 true, GSI_SAME_STMT);
7617 gsi_remove (&si, true);
7619 si = gsi_last_bb (store_bb);
7620 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
7622 if (iaddr == addr)
7623 storedi = stored_val;
7624 else
7625 storedi =
7626 force_gimple_operand_gsi (&si,
7627 build1 (VIEW_CONVERT_EXPR, itype,
7628 stored_val), true, NULL_TREE, true,
7629 GSI_SAME_STMT);
7631 /* Build the compare&swap statement. */
7632 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
7633 new_storedi = force_gimple_operand_gsi (&si,
7634 fold_convert (TREE_TYPE (loadedi),
7635 new_storedi),
7636 true, NULL_TREE,
7637 true, GSI_SAME_STMT);
7639 if (gimple_in_ssa_p (cfun))
7640 old_vali = loadedi;
7641 else
7643 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
7644 stmt = gimple_build_assign (old_vali, loadedi);
7645 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7647 stmt = gimple_build_assign (loadedi, new_storedi);
7648 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7651 /* Note that we always perform the comparison as an integer, even for
7652 floating point. This allows the atomic operation to properly
7653 succeed even with NaNs and -0.0. */
7654 stmt = gimple_build_cond_empty
7655 (build2 (NE_EXPR, boolean_type_node,
7656 new_storedi, old_vali));
7657 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7659 /* Update cfg. */
7660 e = single_succ_edge (store_bb);
7661 e->flags &= ~EDGE_FALLTHRU;
7662 e->flags |= EDGE_FALSE_VALUE;
7664 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
7666 /* Copy the new value to loadedi (we already did that before the condition
7667 if we are not in SSA). */
7668 if (gimple_in_ssa_p (cfun))
7670 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
7671 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
7674 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
7675 gsi_remove (&si, true);
7677 struct loop *loop = alloc_loop ();
7678 loop->header = loop_header;
7679 loop->latch = store_bb;
7680 add_loop (loop, loop_header->loop_father);
7682 if (gimple_in_ssa_p (cfun))
7683 update_ssa (TODO_update_ssa_no_phi);
7685 return true;
7688 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7690 GOMP_atomic_start ();
7691 *addr = rhs;
7692 GOMP_atomic_end ();
7694 The result is not globally atomic, but works so long as all parallel
7695 references are within #pragma omp atomic directives. According to
7696 responses received from omp@openmp.org, appears to be within spec.
7697 Which makes sense, since that's how several other compilers handle
7698 this situation as well.
7699 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
7700 expanding. STORED_VAL is the operand of the matching
7701 GIMPLE_OMP_ATOMIC_STORE.
7703 We replace
7704 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
7705 loaded_val = *addr;
7707 and replace
7708 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
7709 *addr = stored_val;
7712 static bool
7713 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
7714 tree addr, tree loaded_val, tree stored_val)
7716 gimple_stmt_iterator si;
7717 gimple stmt;
7718 tree t;
7720 si = gsi_last_bb (load_bb);
7721 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7723 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
7724 t = build_call_expr (t, 0);
7725 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
7727 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
7728 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7729 gsi_remove (&si, true);
7731 si = gsi_last_bb (store_bb);
7732 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
7734 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
7735 stored_val);
7736 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7738 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
7739 t = build_call_expr (t, 0);
7740 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
7741 gsi_remove (&si, true);
7743 if (gimple_in_ssa_p (cfun))
7744 update_ssa (TODO_update_ssa_no_phi);
7745 return true;
7748 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
7749 using expand_omp_atomic_fetch_op. If it failed, we try to
7750 call expand_omp_atomic_pipeline, and if it fails too, the
7751 ultimate fallback is wrapping the operation in a mutex
7752 (expand_omp_atomic_mutex). REGION is the atomic region built
7753 by build_omp_regions_1(). */
7755 static void
7756 expand_omp_atomic (struct omp_region *region)
7758 basic_block load_bb = region->entry, store_bb = region->exit;
7759 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
7760 tree loaded_val = gimple_omp_atomic_load_lhs (load);
7761 tree addr = gimple_omp_atomic_load_rhs (load);
7762 tree stored_val = gimple_omp_atomic_store_val (store);
7763 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7764 HOST_WIDE_INT index;
7766 /* Make sure the type is one of the supported sizes. */
7767 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
7768 index = exact_log2 (index);
7769 if (index >= 0 && index <= 4)
7771 unsigned int align = TYPE_ALIGN_UNIT (type);
7773 /* __sync builtins require strict data alignment. */
7774 if (exact_log2 (align) >= index)
7776 /* Atomic load. */
7777 if (loaded_val == stored_val
7778 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7779 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7780 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
7781 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
7782 return;
7784 /* Atomic store. */
7785 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7786 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7787 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
7788 && store_bb == single_succ (load_bb)
7789 && first_stmt (store_bb) == store
7790 && expand_omp_atomic_store (load_bb, addr, loaded_val,
7791 stored_val, index))
7792 return;
7794 /* When possible, use specialized atomic update functions. */
7795 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
7796 && store_bb == single_succ (load_bb)
7797 && expand_omp_atomic_fetch_op (load_bb, addr,
7798 loaded_val, stored_val, index))
7799 return;
7801 /* If we don't have specialized __sync builtins, try and implement
7802 as a compare and swap loop. */
7803 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
7804 loaded_val, stored_val, index))
7805 return;
7809 /* The ultimate fallback is wrapping the operation in a mutex. */
7810 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
7814 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
7816 static void
7817 expand_omp_target (struct omp_region *region)
7819 basic_block entry_bb, exit_bb, new_bb;
7820 struct function *child_cfun = NULL;
7821 tree child_fn = NULL_TREE, block, t;
7822 gimple_stmt_iterator gsi;
7823 gimple entry_stmt, stmt;
7824 edge e;
7826 entry_stmt = last_stmt (region->entry);
7827 new_bb = region->entry;
7828 int kind = gimple_omp_target_kind (entry_stmt);
7829 if (kind == GF_OMP_TARGET_KIND_REGION)
7831 child_fn = gimple_omp_target_child_fn (entry_stmt);
7832 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7835 entry_bb = region->entry;
7836 exit_bb = region->exit;
7838 if (kind == GF_OMP_TARGET_KIND_REGION)
7840 unsigned srcidx, dstidx, num;
7842 /* If the target region needs data sent from the parent
7843 function, then the very first statement (except possible
7844 tree profile counter updates) of the parallel body
7845 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
7846 &.OMP_DATA_O is passed as an argument to the child function,
7847 we need to replace it with the argument as seen by the child
7848 function.
7850 In most cases, this will end up being the identity assignment
7851 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
7852 a function call that has been inlined, the original PARM_DECL
7853 .OMP_DATA_I may have been converted into a different local
7854 variable. In which case, we need to keep the assignment. */
7855 if (gimple_omp_target_data_arg (entry_stmt))
7857 basic_block entry_succ_bb = single_succ (entry_bb);
7858 gimple_stmt_iterator gsi;
7859 tree arg;
7860 gimple tgtcopy_stmt = NULL;
7861 tree sender
7862 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
7864 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
7866 gcc_assert (!gsi_end_p (gsi));
7867 stmt = gsi_stmt (gsi);
7868 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7869 continue;
7871 if (gimple_num_ops (stmt) == 2)
7873 tree arg = gimple_assign_rhs1 (stmt);
7875 /* We're ignoring the subcode because we're
7876 effectively doing a STRIP_NOPS. */
7878 if (TREE_CODE (arg) == ADDR_EXPR
7879 && TREE_OPERAND (arg, 0) == sender)
7881 tgtcopy_stmt = stmt;
7882 break;
7887 gcc_assert (tgtcopy_stmt != NULL);
7888 arg = DECL_ARGUMENTS (child_fn);
7890 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
7891 gsi_remove (&gsi, true);
7894 /* Declare local variables needed in CHILD_CFUN. */
7895 block = DECL_INITIAL (child_fn);
7896 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
7897 /* The gimplifier could record temporaries in target block
7898 rather than in containing function's local_decls chain,
7899 which would mean cgraph missed finalizing them. Do it now. */
7900 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
7901 if (TREE_CODE (t) == VAR_DECL
7902 && TREE_STATIC (t)
7903 && !DECL_EXTERNAL (t))
7904 varpool_finalize_decl (t);
7905 DECL_SAVED_TREE (child_fn) = NULL;
7906 /* We'll create a CFG for child_fn, so no gimple body is needed. */
7907 gimple_set_body (child_fn, NULL);
7908 TREE_USED (block) = 1;
7910 /* Reset DECL_CONTEXT on function arguments. */
7911 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7912 DECL_CONTEXT (t) = child_fn;
7914 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
7915 so that it can be moved to the child function. */
7916 gsi = gsi_last_bb (entry_bb);
7917 stmt = gsi_stmt (gsi);
7918 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
7919 && gimple_omp_target_kind (stmt)
7920 == GF_OMP_TARGET_KIND_REGION);
7921 gsi_remove (&gsi, true);
7922 e = split_block (entry_bb, stmt);
7923 entry_bb = e->dest;
7924 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7926 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
7927 if (exit_bb)
7929 gsi = gsi_last_bb (exit_bb);
7930 gcc_assert (!gsi_end_p (gsi)
7931 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7932 stmt = gimple_build_return (NULL);
7933 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7934 gsi_remove (&gsi, true);
7937 /* Move the target region into CHILD_CFUN. */
7939 block = gimple_block (entry_stmt);
7941 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
7942 if (exit_bb)
7943 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
7944 /* When the OMP expansion process cannot guarantee an up-to-date
7945 loop tree arrange for the child function to fixup loops. */
7946 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7947 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
7949 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
7950 num = vec_safe_length (child_cfun->local_decls);
7951 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
7953 t = (*child_cfun->local_decls)[srcidx];
7954 if (DECL_CONTEXT (t) == cfun->decl)
7955 continue;
7956 if (srcidx != dstidx)
7957 (*child_cfun->local_decls)[dstidx] = t;
7958 dstidx++;
7960 if (dstidx != num)
7961 vec_safe_truncate (child_cfun->local_decls, dstidx);
7963 /* Inform the callgraph about the new function. */
7964 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
7965 cgraph_add_new_function (child_fn, true);
7967 /* Fix the callgraph edges for child_cfun. Those for cfun will be
7968 fixed in a following pass. */
7969 push_cfun (child_cfun);
7970 rebuild_cgraph_edges ();
7972 /* Some EH regions might become dead, see PR34608. If
7973 pass_cleanup_cfg isn't the first pass to happen with the
7974 new child, these dead EH edges might cause problems.
7975 Clean them up now. */
7976 if (flag_exceptions)
7978 basic_block bb;
7979 bool changed = false;
7981 FOR_EACH_BB_FN (bb, cfun)
7982 changed |= gimple_purge_dead_eh_edges (bb);
7983 if (changed)
7984 cleanup_tree_cfg ();
7986 pop_cfun ();
7989 /* Emit a library call to launch the target region, or do data
7990 transfers. */
7991 tree t1, t2, t3, t4, device, cond, c, clauses;
7992 enum built_in_function start_ix;
7993 location_t clause_loc;
7995 clauses = gimple_omp_target_clauses (entry_stmt);
7997 if (kind == GF_OMP_TARGET_KIND_REGION)
7998 start_ix = BUILT_IN_GOMP_TARGET;
7999 else if (kind == GF_OMP_TARGET_KIND_DATA)
8000 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8001 else
8002 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8004 /* By default, the value of DEVICE is -1 (let runtime library choose)
8005 and there is no conditional. */
8006 cond = NULL_TREE;
8007 device = build_int_cst (integer_type_node, -1);
8009 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
8010 if (c)
8011 cond = OMP_CLAUSE_IF_EXPR (c);
8013 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
8014 if (c)
8016 device = OMP_CLAUSE_DEVICE_ID (c);
8017 clause_loc = OMP_CLAUSE_LOCATION (c);
8019 else
8020 clause_loc = gimple_location (entry_stmt);
8022 /* Ensure 'device' is of the correct type. */
8023 device = fold_convert_loc (clause_loc, integer_type_node, device);
8025 /* If we found the clause 'if (cond)', build
8026 (cond ? device : -2). */
8027 if (cond)
8029 cond = gimple_boolify (cond);
8031 basic_block cond_bb, then_bb, else_bb;
8032 edge e;
8033 tree tmp_var;
8035 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
8036 if (kind != GF_OMP_TARGET_KIND_REGION)
8038 gsi = gsi_last_bb (new_bb);
8039 gsi_prev (&gsi);
8040 e = split_block (new_bb, gsi_stmt (gsi));
8042 else
8043 e = split_block (new_bb, NULL);
8044 cond_bb = e->src;
8045 new_bb = e->dest;
8046 remove_edge (e);
8048 then_bb = create_empty_bb (cond_bb);
8049 else_bb = create_empty_bb (then_bb);
8050 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
8051 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
8053 stmt = gimple_build_cond_empty (cond);
8054 gsi = gsi_last_bb (cond_bb);
8055 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8057 gsi = gsi_start_bb (then_bb);
8058 stmt = gimple_build_assign (tmp_var, device);
8059 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8061 gsi = gsi_start_bb (else_bb);
8062 stmt = gimple_build_assign (tmp_var,
8063 build_int_cst (integer_type_node, -2));
8064 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8066 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
8067 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
8068 if (current_loops)
8070 add_bb_to_loop (then_bb, cond_bb->loop_father);
8071 add_bb_to_loop (else_bb, cond_bb->loop_father);
8073 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
8074 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
8076 device = tmp_var;
8079 gsi = gsi_last_bb (new_bb);
8080 t = gimple_omp_target_data_arg (entry_stmt);
8081 if (t == NULL)
8083 t1 = size_zero_node;
8084 t2 = build_zero_cst (ptr_type_node);
8085 t3 = t2;
8086 t4 = t2;
8088 else
8090 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8091 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8092 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8093 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8094 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8097 gimple g;
8098 /* FIXME: This will be address of
8099 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8100 symbol, as soon as the linker plugin is able to create it for us. */
8101 tree openmp_target = build_zero_cst (ptr_type_node);
8102 if (kind == GF_OMP_TARGET_KIND_REGION)
8104 tree fnaddr = build_fold_addr_expr (child_fn);
8105 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8106 device, fnaddr, openmp_target, t1, t2, t3, t4);
8108 else
8109 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8110 device, openmp_target, t1, t2, t3, t4);
8111 gimple_set_location (g, gimple_location (entry_stmt));
8112 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8113 if (kind != GF_OMP_TARGET_KIND_REGION)
8115 g = gsi_stmt (gsi);
8116 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8117 gsi_remove (&gsi, true);
8119 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8121 gsi = gsi_last_bb (region->exit);
8122 g = gsi_stmt (gsi);
8123 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8124 gsi_remove (&gsi, true);
8129 /* Expand the parallel region tree rooted at REGION. Expansion
8130 proceeds in depth-first order. Innermost regions are expanded
8131 first. This way, parallel regions that require a new function to
8132 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8133 internal dependencies in their body. */
8135 static void
8136 expand_omp (struct omp_region *region)
8138 while (region)
8140 location_t saved_location;
8141 gimple inner_stmt = NULL;
8143 /* First, determine whether this is a combined parallel+workshare
8144 region. */
8145 if (region->type == GIMPLE_OMP_PARALLEL)
8146 determine_parallel_type (region);
8148 if (region->type == GIMPLE_OMP_FOR
8149 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8150 inner_stmt = last_stmt (region->inner->entry);
8152 if (region->inner)
8153 expand_omp (region->inner);
8155 saved_location = input_location;
8156 if (gimple_has_location (last_stmt (region->entry)))
8157 input_location = gimple_location (last_stmt (region->entry));
8159 switch (region->type)
8161 case GIMPLE_OMP_PARALLEL:
8162 case GIMPLE_OMP_TASK:
8163 expand_omp_taskreg (region);
8164 break;
8166 case GIMPLE_OMP_FOR:
8167 expand_omp_for (region, inner_stmt);
8168 break;
8170 case GIMPLE_OMP_SECTIONS:
8171 expand_omp_sections (region);
8172 break;
8174 case GIMPLE_OMP_SECTION:
8175 /* Individual omp sections are handled together with their
8176 parent GIMPLE_OMP_SECTIONS region. */
8177 break;
8179 case GIMPLE_OMP_SINGLE:
8180 expand_omp_single (region);
8181 break;
8183 case GIMPLE_OMP_MASTER:
8184 case GIMPLE_OMP_TASKGROUP:
8185 case GIMPLE_OMP_ORDERED:
8186 case GIMPLE_OMP_CRITICAL:
8187 case GIMPLE_OMP_TEAMS:
8188 expand_omp_synch (region);
8189 break;
8191 case GIMPLE_OMP_ATOMIC_LOAD:
8192 expand_omp_atomic (region);
8193 break;
8195 case GIMPLE_OMP_TARGET:
8196 expand_omp_target (region);
8197 break;
8199 default:
8200 gcc_unreachable ();
8203 input_location = saved_location;
8204 region = region->next;
8209 /* Helper for build_omp_regions. Scan the dominator tree starting at
8210 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8211 true, the function ends once a single tree is built (otherwise, whole
8212 forest of OMP constructs may be built). */
8214 static void
8215 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8216 bool single_tree)
8218 gimple_stmt_iterator gsi;
8219 gimple stmt;
8220 basic_block son;
8222 gsi = gsi_last_bb (bb);
8223 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8225 struct omp_region *region;
8226 enum gimple_code code;
8228 stmt = gsi_stmt (gsi);
8229 code = gimple_code (stmt);
8230 if (code == GIMPLE_OMP_RETURN)
8232 /* STMT is the return point out of region PARENT. Mark it
8233 as the exit point and make PARENT the immediately
8234 enclosing region. */
8235 gcc_assert (parent);
8236 region = parent;
8237 region->exit = bb;
8238 parent = parent->outer;
8240 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8242 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8243 GIMPLE_OMP_RETURN, but matches with
8244 GIMPLE_OMP_ATOMIC_LOAD. */
8245 gcc_assert (parent);
8246 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8247 region = parent;
8248 region->exit = bb;
8249 parent = parent->outer;
8252 else if (code == GIMPLE_OMP_CONTINUE)
8254 gcc_assert (parent);
8255 parent->cont = bb;
8257 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8259 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8260 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8263 else if (code == GIMPLE_OMP_TARGET
8264 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8265 new_omp_region (bb, code, parent);
8266 else
8268 /* Otherwise, this directive becomes the parent for a new
8269 region. */
8270 region = new_omp_region (bb, code, parent);
8271 parent = region;
8275 if (single_tree && !parent)
8276 return;
8278 for (son = first_dom_son (CDI_DOMINATORS, bb);
8279 son;
8280 son = next_dom_son (CDI_DOMINATORS, son))
8281 build_omp_regions_1 (son, parent, single_tree);
8284 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8285 root_omp_region. */
8287 static void
8288 build_omp_regions_root (basic_block root)
8290 gcc_assert (root_omp_region == NULL);
8291 build_omp_regions_1 (root, NULL, true);
8292 gcc_assert (root_omp_region != NULL);
8295 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8297 void
8298 omp_expand_local (basic_block head)
8300 build_omp_regions_root (head);
8301 if (dump_file && (dump_flags & TDF_DETAILS))
8303 fprintf (dump_file, "\nOMP region tree\n\n");
8304 dump_omp_region (dump_file, root_omp_region, 0);
8305 fprintf (dump_file, "\n");
8308 remove_exit_barriers (root_omp_region);
8309 expand_omp (root_omp_region);
8311 free_omp_regions ();
8314 /* Scan the CFG and build a tree of OMP regions. Return the root of
8315 the OMP region tree. */
8317 static void
8318 build_omp_regions (void)
8320 gcc_assert (root_omp_region == NULL);
8321 calculate_dominance_info (CDI_DOMINATORS);
8322 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8325 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8327 static unsigned int
8328 execute_expand_omp (void)
8330 build_omp_regions ();
8332 if (!root_omp_region)
8333 return 0;
8335 if (dump_file)
8337 fprintf (dump_file, "\nOMP region tree\n\n");
8338 dump_omp_region (dump_file, root_omp_region, 0);
8339 fprintf (dump_file, "\n");
8342 remove_exit_barriers (root_omp_region);
8344 expand_omp (root_omp_region);
8346 cleanup_tree_cfg ();
8348 free_omp_regions ();
8350 return 0;
8353 /* OMP expansion -- the default pass, run before creation of SSA form. */
8355 namespace {
8357 const pass_data pass_data_expand_omp =
8359 GIMPLE_PASS, /* type */
8360 "ompexp", /* name */
8361 OPTGROUP_NONE, /* optinfo_flags */
8362 true, /* has_execute */
8363 TV_NONE, /* tv_id */
8364 PROP_gimple_any, /* properties_required */
8365 0, /* properties_provided */
8366 0, /* properties_destroyed */
8367 0, /* todo_flags_start */
8368 0, /* todo_flags_finish */
8371 class pass_expand_omp : public gimple_opt_pass
8373 public:
8374 pass_expand_omp (gcc::context *ctxt)
8375 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8378 /* opt_pass methods: */
8379 virtual bool gate (function *)
8381 return ((flag_openmp != 0 || flag_openmp_simd != 0
8382 || flag_cilkplus != 0) && !seen_error ());
8385 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8387 }; // class pass_expand_omp
8389 } // anon namespace
8391 gimple_opt_pass *
8392 make_pass_expand_omp (gcc::context *ctxt)
8394 return new pass_expand_omp (ctxt);
8397 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8399 /* If ctx is a worksharing context inside of a cancellable parallel
8400 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8401 and conditional branch to parallel's cancel_label to handle
8402 cancellation in the implicit barrier. */
8404 static void
8405 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8407 gimple omp_return = gimple_seq_last_stmt (*body);
8408 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8409 if (gimple_omp_return_nowait_p (omp_return))
8410 return;
8411 if (ctx->outer
8412 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8413 && ctx->outer->cancellable)
8415 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
8416 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
8417 tree lhs = create_tmp_var (c_bool_type, NULL);
8418 gimple_omp_return_set_lhs (omp_return, lhs);
8419 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8420 gimple g = gimple_build_cond (NE_EXPR, lhs,
8421 fold_convert (c_bool_type,
8422 boolean_false_node),
8423 ctx->outer->cancel_label, fallthru_label);
8424 gimple_seq_add_stmt (body, g);
8425 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8429 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8430 CTX is the enclosing OMP context for the current statement. */
8432 static void
8433 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8435 tree block, control;
8436 gimple_stmt_iterator tgsi;
8437 gimple stmt, new_stmt, bind, t;
8438 gimple_seq ilist, dlist, olist, new_body;
8440 stmt = gsi_stmt (*gsi_p);
8442 push_gimplify_context ();
8444 dlist = NULL;
8445 ilist = NULL;
8446 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8447 &ilist, &dlist, ctx, NULL);
8449 new_body = gimple_omp_body (stmt);
8450 gimple_omp_set_body (stmt, NULL);
8451 tgsi = gsi_start (new_body);
8452 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8454 omp_context *sctx;
8455 gimple sec_start;
8457 sec_start = gsi_stmt (tgsi);
8458 sctx = maybe_lookup_ctx (sec_start);
8459 gcc_assert (sctx);
8461 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8462 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8463 GSI_CONTINUE_LINKING);
8464 gimple_omp_set_body (sec_start, NULL);
8466 if (gsi_one_before_end_p (tgsi))
8468 gimple_seq l = NULL;
8469 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8470 &l, ctx);
8471 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8472 gimple_omp_section_set_last (sec_start);
8475 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8476 GSI_CONTINUE_LINKING);
8479 block = make_node (BLOCK);
8480 bind = gimple_build_bind (NULL, new_body, block);
8482 olist = NULL;
8483 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
8485 block = make_node (BLOCK);
8486 new_stmt = gimple_build_bind (NULL, NULL, block);
8487 gsi_replace (gsi_p, new_stmt, true);
8489 pop_gimplify_context (new_stmt);
8490 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8491 BLOCK_VARS (block) = gimple_bind_vars (bind);
8492 if (BLOCK_VARS (block))
8493 TREE_USED (block) = 1;
8495 new_body = NULL;
8496 gimple_seq_add_seq (&new_body, ilist);
8497 gimple_seq_add_stmt (&new_body, stmt);
8498 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8499 gimple_seq_add_stmt (&new_body, bind);
8501 control = create_tmp_var (unsigned_type_node, ".section");
8502 t = gimple_build_omp_continue (control, control);
8503 gimple_omp_sections_set_control (stmt, control);
8504 gimple_seq_add_stmt (&new_body, t);
8506 gimple_seq_add_seq (&new_body, olist);
8507 if (ctx->cancellable)
8508 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
8509 gimple_seq_add_seq (&new_body, dlist);
8511 new_body = maybe_catch_exception (new_body);
8513 t = gimple_build_omp_return
8514 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8515 OMP_CLAUSE_NOWAIT));
8516 gimple_seq_add_stmt (&new_body, t);
8517 maybe_add_implicit_barrier_cancel (ctx, &new_body);
8519 gimple_bind_set_body (new_stmt, new_body);
8523 /* A subroutine of lower_omp_single. Expand the simple form of
8524 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8526 if (GOMP_single_start ())
8527 BODY;
8528 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8530 FIXME. It may be better to delay expanding the logic of this until
8531 pass_expand_omp. The expanded logic may make the job more difficult
8532 to a synchronization analysis pass. */
8534 static void
8535 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
8537 location_t loc = gimple_location (single_stmt);
8538 tree tlabel = create_artificial_label (loc);
8539 tree flabel = create_artificial_label (loc);
8540 gimple call, cond;
8541 tree lhs, decl;
8543 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
8544 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8545 call = gimple_build_call (decl, 0);
8546 gimple_call_set_lhs (call, lhs);
8547 gimple_seq_add_stmt (pre_p, call);
8549 cond = gimple_build_cond (EQ_EXPR, lhs,
8550 fold_convert_loc (loc, TREE_TYPE (lhs),
8551 boolean_true_node),
8552 tlabel, flabel);
8553 gimple_seq_add_stmt (pre_p, cond);
8554 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
8555 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8556 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
8560 /* A subroutine of lower_omp_single. Expand the simple form of
8561 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
8563 #pragma omp single copyprivate (a, b, c)
8565 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
8568 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
8570 BODY;
8571 copyout.a = a;
8572 copyout.b = b;
8573 copyout.c = c;
8574 GOMP_single_copy_end (&copyout);
8576 else
8578 a = copyout_p->a;
8579 b = copyout_p->b;
8580 c = copyout_p->c;
8582 GOMP_barrier ();
8585 FIXME. It may be better to delay expanding the logic of this until
8586 pass_expand_omp. The expanded logic may make the job more difficult
8587 to a synchronization analysis pass. */
8589 static void
8590 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
8592 tree ptr_type, t, l0, l1, l2, bfn_decl;
8593 gimple_seq copyin_seq;
8594 location_t loc = gimple_location (single_stmt);
8596 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
8598 ptr_type = build_pointer_type (ctx->record_type);
8599 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
8601 l0 = create_artificial_label (loc);
8602 l1 = create_artificial_label (loc);
8603 l2 = create_artificial_label (loc);
8605 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
8606 t = build_call_expr_loc (loc, bfn_decl, 0);
8607 t = fold_convert_loc (loc, ptr_type, t);
8608 gimplify_assign (ctx->receiver_decl, t, pre_p);
8610 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
8611 build_int_cst (ptr_type, 0));
8612 t = build3 (COND_EXPR, void_type_node, t,
8613 build_and_jump (&l0), build_and_jump (&l1));
8614 gimplify_and_add (t, pre_p);
8616 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
8618 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8620 copyin_seq = NULL;
8621 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
8622 &copyin_seq, ctx);
8624 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
8625 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
8626 t = build_call_expr_loc (loc, bfn_decl, 1, t);
8627 gimplify_and_add (t, pre_p);
8629 t = build_and_jump (&l2);
8630 gimplify_and_add (t, pre_p);
8632 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
8634 gimple_seq_add_seq (pre_p, copyin_seq);
8636 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
8640 /* Expand code for an OpenMP single directive. */
8642 static void
8643 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8645 tree block;
8646 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
8647 gimple_seq bind_body, bind_body_tail = NULL, dlist;
8649 push_gimplify_context ();
8651 block = make_node (BLOCK);
8652 bind = gimple_build_bind (NULL, NULL, block);
8653 gsi_replace (gsi_p, bind, true);
8654 bind_body = NULL;
8655 dlist = NULL;
8656 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
8657 &bind_body, &dlist, ctx, NULL);
8658 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
8660 gimple_seq_add_stmt (&bind_body, single_stmt);
8662 if (ctx->record_type)
8663 lower_omp_single_copy (single_stmt, &bind_body, ctx);
8664 else
8665 lower_omp_single_simple (single_stmt, &bind_body);
8667 gimple_omp_set_body (single_stmt, NULL);
8669 gimple_seq_add_seq (&bind_body, dlist);
8671 bind_body = maybe_catch_exception (bind_body);
8673 t = gimple_build_omp_return
8674 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
8675 OMP_CLAUSE_NOWAIT));
8676 gimple_seq_add_stmt (&bind_body_tail, t);
8677 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
8678 if (ctx->record_type)
8680 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
8681 tree clobber = build_constructor (ctx->record_type, NULL);
8682 TREE_THIS_VOLATILE (clobber) = 1;
8683 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
8684 clobber), GSI_SAME_STMT);
8686 gimple_seq_add_seq (&bind_body, bind_body_tail);
8687 gimple_bind_set_body (bind, bind_body);
8689 pop_gimplify_context (bind);
8691 gimple_bind_append_vars (bind, ctx->block_vars);
8692 BLOCK_VARS (block) = ctx->block_vars;
8693 if (BLOCK_VARS (block))
8694 TREE_USED (block) = 1;
8698 /* Expand code for an OpenMP master directive. */
8700 static void
8701 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8703 tree block, lab = NULL, x, bfn_decl;
8704 gimple stmt = gsi_stmt (*gsi_p), bind;
8705 location_t loc = gimple_location (stmt);
8706 gimple_seq tseq;
8708 push_gimplify_context ();
8710 block = make_node (BLOCK);
8711 bind = gimple_build_bind (NULL, NULL, block);
8712 gsi_replace (gsi_p, bind, true);
8713 gimple_bind_add_stmt (bind, stmt);
8715 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
8716 x = build_call_expr_loc (loc, bfn_decl, 0);
8717 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
8718 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
8719 tseq = NULL;
8720 gimplify_and_add (x, &tseq);
8721 gimple_bind_add_seq (bind, tseq);
8723 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8724 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8725 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8726 gimple_omp_set_body (stmt, NULL);
8728 gimple_bind_add_stmt (bind, gimple_build_label (lab));
8730 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8732 pop_gimplify_context (bind);
8734 gimple_bind_append_vars (bind, ctx->block_vars);
8735 BLOCK_VARS (block) = ctx->block_vars;
8739 /* Expand code for an OpenMP taskgroup directive. */
8741 static void
8742 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8744 gimple stmt = gsi_stmt (*gsi_p), bind, x;
8745 tree block = make_node (BLOCK);
8747 bind = gimple_build_bind (NULL, NULL, block);
8748 gsi_replace (gsi_p, bind, true);
8749 gimple_bind_add_stmt (bind, stmt);
8751 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
8753 gimple_bind_add_stmt (bind, x);
8755 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8756 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8757 gimple_omp_set_body (stmt, NULL);
8759 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8761 gimple_bind_append_vars (bind, ctx->block_vars);
8762 BLOCK_VARS (block) = ctx->block_vars;
8766 /* Expand code for an OpenMP ordered directive. */
8768 static void
8769 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8771 tree block;
8772 gimple stmt = gsi_stmt (*gsi_p), bind, x;
8774 push_gimplify_context ();
8776 block = make_node (BLOCK);
8777 bind = gimple_build_bind (NULL, NULL, block);
8778 gsi_replace (gsi_p, bind, true);
8779 gimple_bind_add_stmt (bind, stmt);
8781 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
8783 gimple_bind_add_stmt (bind, x);
8785 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8786 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8787 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8788 gimple_omp_set_body (stmt, NULL);
8790 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
8791 gimple_bind_add_stmt (bind, x);
8793 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8795 pop_gimplify_context (bind);
8797 gimple_bind_append_vars (bind, ctx->block_vars);
8798 BLOCK_VARS (block) = gimple_bind_vars (bind);
8802 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
8803 substitution of a couple of function calls. But in the NAMED case,
8804 requires that languages coordinate a symbol name. It is therefore
8805 best put here in common code. */
8807 static GTY((param1_is (tree), param2_is (tree)))
8808 splay_tree critical_name_mutexes;
8810 static void
8811 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8813 tree block;
8814 tree name, lock, unlock;
8815 gimple stmt = gsi_stmt (*gsi_p), bind;
8816 location_t loc = gimple_location (stmt);
8817 gimple_seq tbody;
8819 name = gimple_omp_critical_name (stmt);
8820 if (name)
8822 tree decl;
8823 splay_tree_node n;
8825 if (!critical_name_mutexes)
8826 critical_name_mutexes
8827 = splay_tree_new_ggc (splay_tree_compare_pointers,
8828 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
8829 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
8831 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
8832 if (n == NULL)
8834 char *new_str;
8836 decl = create_tmp_var_raw (ptr_type_node, NULL);
8838 new_str = ACONCAT ((".gomp_critical_user_",
8839 IDENTIFIER_POINTER (name), NULL));
8840 DECL_NAME (decl) = get_identifier (new_str);
8841 TREE_PUBLIC (decl) = 1;
8842 TREE_STATIC (decl) = 1;
8843 DECL_COMMON (decl) = 1;
8844 DECL_ARTIFICIAL (decl) = 1;
8845 DECL_IGNORED_P (decl) = 1;
8846 varpool_finalize_decl (decl);
8848 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
8849 (splay_tree_value) decl);
8851 else
8852 decl = (tree) n->value;
8854 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
8855 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
8857 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
8858 unlock = build_call_expr_loc (loc, unlock, 1,
8859 build_fold_addr_expr_loc (loc, decl));
8861 else
8863 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
8864 lock = build_call_expr_loc (loc, lock, 0);
8866 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
8867 unlock = build_call_expr_loc (loc, unlock, 0);
8870 push_gimplify_context ();
8872 block = make_node (BLOCK);
8873 bind = gimple_build_bind (NULL, NULL, block);
8874 gsi_replace (gsi_p, bind, true);
8875 gimple_bind_add_stmt (bind, stmt);
8877 tbody = gimple_bind_body (bind);
8878 gimplify_and_add (lock, &tbody);
8879 gimple_bind_set_body (bind, tbody);
8881 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8882 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8883 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8884 gimple_omp_set_body (stmt, NULL);
8886 tbody = gimple_bind_body (bind);
8887 gimplify_and_add (unlock, &tbody);
8888 gimple_bind_set_body (bind, tbody);
8890 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8892 pop_gimplify_context (bind);
8893 gimple_bind_append_vars (bind, ctx->block_vars);
8894 BLOCK_VARS (block) = gimple_bind_vars (bind);
8898 /* A subroutine of lower_omp_for. Generate code to emit the predicate
8899 for a lastprivate clause. Given a loop control predicate of (V
8900 cond N2), we gate the clause on (!(V cond N2)). The lowered form
8901 is appended to *DLIST, iterator initialization is appended to
8902 *BODY_P. */
8904 static void
8905 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
8906 gimple_seq *dlist, struct omp_context *ctx)
8908 tree clauses, cond, vinit;
8909 enum tree_code cond_code;
8910 gimple_seq stmts;
8912 cond_code = fd->loop.cond_code;
8913 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
8915 /* When possible, use a strict equality expression. This can let VRP
8916 type optimizations deduce the value and remove a copy. */
8917 if (tree_fits_shwi_p (fd->loop.step))
8919 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
8920 if (step == 1 || step == -1)
8921 cond_code = EQ_EXPR;
8924 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
8926 clauses = gimple_omp_for_clauses (fd->for_stmt);
8927 stmts = NULL;
8928 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
8929 if (!gimple_seq_empty_p (stmts))
8931 gimple_seq_add_seq (&stmts, *dlist);
8932 *dlist = stmts;
8934 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
8935 vinit = fd->loop.n1;
8936 if (cond_code == EQ_EXPR
8937 && tree_fits_shwi_p (fd->loop.n2)
8938 && ! integer_zerop (fd->loop.n2))
8939 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
8940 else
8941 vinit = unshare_expr (vinit);
8943 /* Initialize the iterator variable, so that threads that don't execute
8944 any iterations don't execute the lastprivate clauses by accident. */
8945 gimplify_assign (fd->loop.v, vinit, body_p);
8950 /* Lower code for an OpenMP loop directive. */
8952 static void
8953 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8955 tree *rhs_p, block;
8956 struct omp_for_data fd, *fdp = NULL;
8957 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
8958 gimple_seq omp_for_body, body, dlist;
8959 size_t i;
8961 push_gimplify_context ();
8963 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
8965 block = make_node (BLOCK);
8966 new_stmt = gimple_build_bind (NULL, NULL, block);
8967 /* Replace at gsi right away, so that 'stmt' is no member
8968 of a sequence anymore as we're going to add to to a different
8969 one below. */
8970 gsi_replace (gsi_p, new_stmt, true);
8972 /* Move declaration of temporaries in the loop body before we make
8973 it go away. */
8974 omp_for_body = gimple_omp_body (stmt);
8975 if (!gimple_seq_empty_p (omp_for_body)
8976 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
8978 gimple inner_bind = gimple_seq_first_stmt (omp_for_body);
8979 tree vars = gimple_bind_vars (inner_bind);
8980 gimple_bind_append_vars (new_stmt, vars);
8981 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
8982 keep them on the inner_bind and it's block. */
8983 gimple_bind_set_vars (inner_bind, NULL_TREE);
8984 if (gimple_bind_block (inner_bind))
8985 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
8988 if (gimple_omp_for_combined_into_p (stmt))
8990 extract_omp_for_data (stmt, &fd, NULL);
8991 fdp = &fd;
8993 /* We need two temporaries with fd.loop.v type (istart/iend)
8994 and then (fd.collapse - 1) temporaries with the same
8995 type for count2 ... countN-1 vars if not constant. */
8996 size_t count = 2;
8997 tree type = fd.iter_type;
8998 if (fd.collapse > 1
8999 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
9000 count += fd.collapse - 1;
9001 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
9002 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
9003 tree clauses = *pc;
9004 if (parallel_for)
9005 outerc
9006 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
9007 OMP_CLAUSE__LOOPTEMP_);
9008 for (i = 0; i < count; i++)
9010 tree temp;
9011 if (parallel_for)
9013 gcc_assert (outerc);
9014 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
9015 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
9016 OMP_CLAUSE__LOOPTEMP_);
9018 else
9019 temp = create_tmp_var (type, NULL);
9020 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
9021 OMP_CLAUSE_DECL (*pc) = temp;
9022 pc = &OMP_CLAUSE_CHAIN (*pc);
9024 *pc = clauses;
9027 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9028 dlist = NULL;
9029 body = NULL;
9030 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
9031 fdp);
9032 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
9034 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9036 /* Lower the header expressions. At this point, we can assume that
9037 the header is of the form:
9039 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9041 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9042 using the .omp_data_s mapping, if needed. */
9043 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
9045 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
9046 if (!is_gimple_min_invariant (*rhs_p))
9047 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9049 rhs_p = gimple_omp_for_final_ptr (stmt, i);
9050 if (!is_gimple_min_invariant (*rhs_p))
9051 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9053 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
9054 if (!is_gimple_min_invariant (*rhs_p))
9055 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9058 /* Once lowered, extract the bounds and clauses. */
9059 extract_omp_for_data (stmt, &fd, NULL);
9061 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
9063 gimple_seq_add_stmt (&body, stmt);
9064 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
9066 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
9067 fd.loop.v));
9069 /* After the loop, add exit clauses. */
9070 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
9072 if (ctx->cancellable)
9073 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
9075 gimple_seq_add_seq (&body, dlist);
9077 body = maybe_catch_exception (body);
9079 /* Region exit marker goes at the end of the loop body. */
9080 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
9081 maybe_add_implicit_barrier_cancel (ctx, &body);
9082 pop_gimplify_context (new_stmt);
9084 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9085 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
9086 if (BLOCK_VARS (block))
9087 TREE_USED (block) = 1;
9089 gimple_bind_set_body (new_stmt, body);
9090 gimple_omp_set_body (stmt, NULL);
9091 gimple_omp_for_set_pre_body (stmt, NULL);
9094 /* Callback for walk_stmts. Check if the current statement only contains
9095 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9097 static tree
9098 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9099 bool *handled_ops_p,
9100 struct walk_stmt_info *wi)
9102 int *info = (int *) wi->info;
9103 gimple stmt = gsi_stmt (*gsi_p);
9105 *handled_ops_p = true;
9106 switch (gimple_code (stmt))
9108 WALK_SUBSTMTS;
9110 case GIMPLE_OMP_FOR:
9111 case GIMPLE_OMP_SECTIONS:
9112 *info = *info == 0 ? 1 : -1;
9113 break;
9114 default:
9115 *info = -1;
9116 break;
9118 return NULL;
9121 struct omp_taskcopy_context
9123 /* This field must be at the beginning, as we do "inheritance": Some
9124 callback functions for tree-inline.c (e.g., omp_copy_decl)
9125 receive a copy_body_data pointer that is up-casted to an
9126 omp_context pointer. */
9127 copy_body_data cb;
9128 omp_context *ctx;
9131 static tree
9132 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9134 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9136 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9137 return create_tmp_var (TREE_TYPE (var), NULL);
9139 return var;
9142 static tree
9143 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9145 tree name, new_fields = NULL, type, f;
9147 type = lang_hooks.types.make_type (RECORD_TYPE);
9148 name = DECL_NAME (TYPE_NAME (orig_type));
9149 name = build_decl (gimple_location (tcctx->ctx->stmt),
9150 TYPE_DECL, name, type);
9151 TYPE_NAME (type) = name;
9153 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9155 tree new_f = copy_node (f);
9156 DECL_CONTEXT (new_f) = type;
9157 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9158 TREE_CHAIN (new_f) = new_fields;
9159 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9160 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9161 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9162 &tcctx->cb, NULL);
9163 new_fields = new_f;
9164 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
9166 TYPE_FIELDS (type) = nreverse (new_fields);
9167 layout_type (type);
9168 return type;
9171 /* Create task copyfn. */
9173 static void
9174 create_task_copyfn (gimple task_stmt, omp_context *ctx)
9176 struct function *child_cfun;
9177 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9178 tree record_type, srecord_type, bind, list;
9179 bool record_needs_remap = false, srecord_needs_remap = false;
9180 splay_tree_node n;
9181 struct omp_taskcopy_context tcctx;
9182 location_t loc = gimple_location (task_stmt);
9184 child_fn = gimple_omp_task_copy_fn (task_stmt);
9185 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9186 gcc_assert (child_cfun->cfg == NULL);
9187 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9189 /* Reset DECL_CONTEXT on function arguments. */
9190 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9191 DECL_CONTEXT (t) = child_fn;
9193 /* Populate the function. */
9194 push_gimplify_context ();
9195 push_cfun (child_cfun);
9197 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9198 TREE_SIDE_EFFECTS (bind) = 1;
9199 list = NULL;
9200 DECL_SAVED_TREE (child_fn) = bind;
9201 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9203 /* Remap src and dst argument types if needed. */
9204 record_type = ctx->record_type;
9205 srecord_type = ctx->srecord_type;
9206 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9207 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9209 record_needs_remap = true;
9210 break;
9212 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9213 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9215 srecord_needs_remap = true;
9216 break;
9219 if (record_needs_remap || srecord_needs_remap)
9221 memset (&tcctx, '\0', sizeof (tcctx));
9222 tcctx.cb.src_fn = ctx->cb.src_fn;
9223 tcctx.cb.dst_fn = child_fn;
9224 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
9225 gcc_checking_assert (tcctx.cb.src_node);
9226 tcctx.cb.dst_node = tcctx.cb.src_node;
9227 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9228 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9229 tcctx.cb.eh_lp_nr = 0;
9230 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9231 tcctx.cb.decl_map = pointer_map_create ();
9232 tcctx.ctx = ctx;
9234 if (record_needs_remap)
9235 record_type = task_copyfn_remap_type (&tcctx, record_type);
9236 if (srecord_needs_remap)
9237 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9239 else
9240 tcctx.cb.decl_map = NULL;
9242 arg = DECL_ARGUMENTS (child_fn);
9243 TREE_TYPE (arg) = build_pointer_type (record_type);
9244 sarg = DECL_CHAIN (arg);
9245 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9247 /* First pass: initialize temporaries used in record_type and srecord_type
9248 sizes and field offsets. */
9249 if (tcctx.cb.decl_map)
9250 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9251 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9253 tree *p;
9255 decl = OMP_CLAUSE_DECL (c);
9256 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
9257 if (p == NULL)
9258 continue;
9259 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9260 sf = (tree) n->value;
9261 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9262 src = build_simple_mem_ref_loc (loc, sarg);
9263 src = omp_build_component_ref (src, sf);
9264 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9265 append_to_statement_list (t, &list);
9268 /* Second pass: copy shared var pointers and copy construct non-VLA
9269 firstprivate vars. */
9270 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9271 switch (OMP_CLAUSE_CODE (c))
9273 case OMP_CLAUSE_SHARED:
9274 decl = OMP_CLAUSE_DECL (c);
9275 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9276 if (n == NULL)
9277 break;
9278 f = (tree) n->value;
9279 if (tcctx.cb.decl_map)
9280 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9281 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9282 sf = (tree) n->value;
9283 if (tcctx.cb.decl_map)
9284 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9285 src = build_simple_mem_ref_loc (loc, sarg);
9286 src = omp_build_component_ref (src, sf);
9287 dst = build_simple_mem_ref_loc (loc, arg);
9288 dst = omp_build_component_ref (dst, f);
9289 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9290 append_to_statement_list (t, &list);
9291 break;
9292 case OMP_CLAUSE_FIRSTPRIVATE:
9293 decl = OMP_CLAUSE_DECL (c);
9294 if (is_variable_sized (decl))
9295 break;
9296 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9297 if (n == NULL)
9298 break;
9299 f = (tree) n->value;
9300 if (tcctx.cb.decl_map)
9301 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9302 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9303 if (n != NULL)
9305 sf = (tree) n->value;
9306 if (tcctx.cb.decl_map)
9307 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9308 src = build_simple_mem_ref_loc (loc, sarg);
9309 src = omp_build_component_ref (src, sf);
9310 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9311 src = build_simple_mem_ref_loc (loc, src);
9313 else
9314 src = decl;
9315 dst = build_simple_mem_ref_loc (loc, arg);
9316 dst = omp_build_component_ref (dst, f);
9317 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9318 append_to_statement_list (t, &list);
9319 break;
9320 case OMP_CLAUSE_PRIVATE:
9321 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9322 break;
9323 decl = OMP_CLAUSE_DECL (c);
9324 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9325 f = (tree) n->value;
9326 if (tcctx.cb.decl_map)
9327 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9328 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9329 if (n != NULL)
9331 sf = (tree) n->value;
9332 if (tcctx.cb.decl_map)
9333 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9334 src = build_simple_mem_ref_loc (loc, sarg);
9335 src = omp_build_component_ref (src, sf);
9336 if (use_pointer_for_field (decl, NULL))
9337 src = build_simple_mem_ref_loc (loc, src);
9339 else
9340 src = decl;
9341 dst = build_simple_mem_ref_loc (loc, arg);
9342 dst = omp_build_component_ref (dst, f);
9343 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9344 append_to_statement_list (t, &list);
9345 break;
9346 default:
9347 break;
9350 /* Last pass: handle VLA firstprivates. */
9351 if (tcctx.cb.decl_map)
9352 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9353 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9355 tree ind, ptr, df;
9357 decl = OMP_CLAUSE_DECL (c);
9358 if (!is_variable_sized (decl))
9359 continue;
9360 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9361 if (n == NULL)
9362 continue;
9363 f = (tree) n->value;
9364 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9365 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9366 ind = DECL_VALUE_EXPR (decl);
9367 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9368 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9369 n = splay_tree_lookup (ctx->sfield_map,
9370 (splay_tree_key) TREE_OPERAND (ind, 0));
9371 sf = (tree) n->value;
9372 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9373 src = build_simple_mem_ref_loc (loc, sarg);
9374 src = omp_build_component_ref (src, sf);
9375 src = build_simple_mem_ref_loc (loc, src);
9376 dst = build_simple_mem_ref_loc (loc, arg);
9377 dst = omp_build_component_ref (dst, f);
9378 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9379 append_to_statement_list (t, &list);
9380 n = splay_tree_lookup (ctx->field_map,
9381 (splay_tree_key) TREE_OPERAND (ind, 0));
9382 df = (tree) n->value;
9383 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
9384 ptr = build_simple_mem_ref_loc (loc, arg);
9385 ptr = omp_build_component_ref (ptr, df);
9386 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9387 build_fold_addr_expr_loc (loc, dst));
9388 append_to_statement_list (t, &list);
9391 t = build1 (RETURN_EXPR, void_type_node, NULL);
9392 append_to_statement_list (t, &list);
9394 if (tcctx.cb.decl_map)
9395 pointer_map_destroy (tcctx.cb.decl_map);
9396 pop_gimplify_context (NULL);
9397 BIND_EXPR_BODY (bind) = list;
9398 pop_cfun ();
9401 static void
9402 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9404 tree c, clauses;
9405 gimple g;
9406 size_t n_in = 0, n_out = 0, idx = 2, i;
9408 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9409 OMP_CLAUSE_DEPEND);
9410 gcc_assert (clauses);
9411 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9412 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9413 switch (OMP_CLAUSE_DEPEND_KIND (c))
9415 case OMP_CLAUSE_DEPEND_IN:
9416 n_in++;
9417 break;
9418 case OMP_CLAUSE_DEPEND_OUT:
9419 case OMP_CLAUSE_DEPEND_INOUT:
9420 n_out++;
9421 break;
9422 default:
9423 gcc_unreachable ();
9425 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9426 tree array = create_tmp_var (type, NULL);
9427 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9428 NULL_TREE);
9429 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9430 gimple_seq_add_stmt (iseq, g);
9431 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9432 NULL_TREE);
9433 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9434 gimple_seq_add_stmt (iseq, g);
9435 for (i = 0; i < 2; i++)
9437 if ((i ? n_in : n_out) == 0)
9438 continue;
9439 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9440 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9441 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9443 tree t = OMP_CLAUSE_DECL (c);
9444 t = fold_convert (ptr_type_node, t);
9445 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9446 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9447 NULL_TREE, NULL_TREE);
9448 g = gimple_build_assign (r, t);
9449 gimple_seq_add_stmt (iseq, g);
9452 tree *p = gimple_omp_task_clauses_ptr (stmt);
9453 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9454 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9455 OMP_CLAUSE_CHAIN (c) = *p;
9456 *p = c;
9457 tree clobber = build_constructor (type, NULL);
9458 TREE_THIS_VOLATILE (clobber) = 1;
9459 g = gimple_build_assign (array, clobber);
9460 gimple_seq_add_stmt (oseq, g);
9463 /* Lower the OpenMP parallel or task directive in the current statement
9464 in GSI_P. CTX holds context information for the directive. */
9466 static void
9467 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9469 tree clauses;
9470 tree child_fn, t;
9471 gimple stmt = gsi_stmt (*gsi_p);
9472 gimple par_bind, bind, dep_bind = NULL;
9473 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9474 location_t loc = gimple_location (stmt);
9476 clauses = gimple_omp_taskreg_clauses (stmt);
9477 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9478 par_body = gimple_bind_body (par_bind);
9479 child_fn = ctx->cb.dst_fn;
9480 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9481 && !gimple_omp_parallel_combined_p (stmt))
9483 struct walk_stmt_info wi;
9484 int ws_num = 0;
9486 memset (&wi, 0, sizeof (wi));
9487 wi.info = &ws_num;
9488 wi.val_only = true;
9489 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
9490 if (ws_num == 1)
9491 gimple_omp_parallel_set_combined_p (stmt, true);
9493 gimple_seq dep_ilist = NULL;
9494 gimple_seq dep_olist = NULL;
9495 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9496 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9498 push_gimplify_context ();
9499 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9500 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9503 if (ctx->srecord_type)
9504 create_task_copyfn (stmt, ctx);
9506 push_gimplify_context ();
9508 par_olist = NULL;
9509 par_ilist = NULL;
9510 par_rlist = NULL;
9511 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
9512 lower_omp (&par_body, ctx);
9513 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
9514 lower_reduction_clauses (clauses, &par_rlist, ctx);
9516 /* Declare all the variables created by mapping and the variables
9517 declared in the scope of the parallel body. */
9518 record_vars_into (ctx->block_vars, child_fn);
9519 record_vars_into (gimple_bind_vars (par_bind), child_fn);
9521 if (ctx->record_type)
9523 ctx->sender_decl
9524 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9525 : ctx->record_type, ".omp_data_o");
9526 DECL_NAMELESS (ctx->sender_decl) = 1;
9527 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9528 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
9531 olist = NULL;
9532 ilist = NULL;
9533 lower_send_clauses (clauses, &ilist, &olist, ctx);
9534 lower_send_shared_vars (&ilist, &olist, ctx);
9536 if (ctx->record_type)
9538 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
9539 TREE_THIS_VOLATILE (clobber) = 1;
9540 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9541 clobber));
9544 /* Once all the expansions are done, sequence all the different
9545 fragments inside gimple_omp_body. */
9547 new_body = NULL;
9549 if (ctx->record_type)
9551 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9552 /* fixup_child_record_type might have changed receiver_decl's type. */
9553 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9554 gimple_seq_add_stmt (&new_body,
9555 gimple_build_assign (ctx->receiver_decl, t));
9558 gimple_seq_add_seq (&new_body, par_ilist);
9559 gimple_seq_add_seq (&new_body, par_body);
9560 gimple_seq_add_seq (&new_body, par_rlist);
9561 if (ctx->cancellable)
9562 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
9563 gimple_seq_add_seq (&new_body, par_olist);
9564 new_body = maybe_catch_exception (new_body);
9565 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9566 gimple_omp_set_body (stmt, new_body);
9568 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
9569 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
9570 gimple_bind_add_seq (bind, ilist);
9571 gimple_bind_add_stmt (bind, stmt);
9572 gimple_bind_add_seq (bind, olist);
9574 pop_gimplify_context (NULL);
9576 if (dep_bind)
9578 gimple_bind_add_seq (dep_bind, dep_ilist);
9579 gimple_bind_add_stmt (dep_bind, bind);
9580 gimple_bind_add_seq (dep_bind, dep_olist);
9581 pop_gimplify_context (dep_bind);
9585 /* Lower the OpenMP target directive in the current statement
9586 in GSI_P. CTX holds context information for the directive. */
9588 static void
9589 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9591 tree clauses;
9592 tree child_fn, t, c;
9593 gimple stmt = gsi_stmt (*gsi_p);
9594 gimple tgt_bind = NULL, bind;
9595 gimple_seq tgt_body = NULL, olist, ilist, new_body;
9596 location_t loc = gimple_location (stmt);
9597 int kind = gimple_omp_target_kind (stmt);
9598 unsigned int map_cnt = 0;
9600 clauses = gimple_omp_target_clauses (stmt);
9601 if (kind == GF_OMP_TARGET_KIND_REGION)
9603 tgt_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9604 tgt_body = gimple_bind_body (tgt_bind);
9606 else if (kind == GF_OMP_TARGET_KIND_DATA)
9607 tgt_body = gimple_omp_body (stmt);
9608 child_fn = ctx->cb.dst_fn;
9610 push_gimplify_context ();
9612 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9613 switch (OMP_CLAUSE_CODE (c))
9615 tree var, x;
9617 default:
9618 break;
9619 case OMP_CLAUSE_MAP:
9620 case OMP_CLAUSE_TO:
9621 case OMP_CLAUSE_FROM:
9622 var = OMP_CLAUSE_DECL (c);
9623 if (!DECL_P (var))
9625 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
9626 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9627 map_cnt++;
9628 continue;
9631 if (DECL_SIZE (var)
9632 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
9634 tree var2 = DECL_VALUE_EXPR (var);
9635 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
9636 var2 = TREE_OPERAND (var2, 0);
9637 gcc_assert (DECL_P (var2));
9638 var = var2;
9641 if (!maybe_lookup_field (var, ctx))
9642 continue;
9644 if (kind == GF_OMP_TARGET_KIND_REGION)
9646 x = build_receiver_ref (var, true, ctx);
9647 tree new_var = lookup_decl (var, ctx);
9648 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9649 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9650 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9651 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
9652 x = build_simple_mem_ref (x);
9653 SET_DECL_VALUE_EXPR (new_var, x);
9654 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
9656 map_cnt++;
9659 if (kind == GF_OMP_TARGET_KIND_REGION)
9661 target_nesting_level++;
9662 lower_omp (&tgt_body, ctx);
9663 target_nesting_level--;
9665 else if (kind == GF_OMP_TARGET_KIND_DATA)
9666 lower_omp (&tgt_body, ctx);
9668 if (kind == GF_OMP_TARGET_KIND_REGION)
9670 /* Declare all the variables created by mapping and the variables
9671 declared in the scope of the target body. */
9672 record_vars_into (ctx->block_vars, child_fn);
9673 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
9676 olist = NULL;
9677 ilist = NULL;
9678 if (ctx->record_type)
9680 ctx->sender_decl
9681 = create_tmp_var (ctx->record_type, ".omp_data_arr");
9682 DECL_NAMELESS (ctx->sender_decl) = 1;
9683 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9684 t = make_tree_vec (3);
9685 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
9686 TREE_VEC_ELT (t, 1)
9687 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
9688 ".omp_data_sizes");
9689 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
9690 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
9691 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
9692 TREE_VEC_ELT (t, 2)
9693 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
9694 map_cnt),
9695 ".omp_data_kinds");
9696 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
9697 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
9698 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
9699 gimple_omp_target_set_data_arg (stmt, t);
9701 vec<constructor_elt, va_gc> *vsize;
9702 vec<constructor_elt, va_gc> *vkind;
9703 vec_alloc (vsize, map_cnt);
9704 vec_alloc (vkind, map_cnt);
9705 unsigned int map_idx = 0;
9707 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9708 switch (OMP_CLAUSE_CODE (c))
9710 tree ovar, nc;
9712 default:
9713 break;
9714 case OMP_CLAUSE_MAP:
9715 case OMP_CLAUSE_TO:
9716 case OMP_CLAUSE_FROM:
9717 nc = c;
9718 ovar = OMP_CLAUSE_DECL (c);
9719 if (!DECL_P (ovar))
9721 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9722 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9724 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
9725 == get_base_address (ovar));
9726 nc = OMP_CLAUSE_CHAIN (c);
9727 ovar = OMP_CLAUSE_DECL (nc);
9729 else
9731 tree x = build_sender_ref (ovar, ctx);
9732 tree v
9733 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
9734 gimplify_assign (x, v, &ilist);
9735 nc = NULL_TREE;
9738 else
9740 if (DECL_SIZE (ovar)
9741 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
9743 tree ovar2 = DECL_VALUE_EXPR (ovar);
9744 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
9745 ovar2 = TREE_OPERAND (ovar2, 0);
9746 gcc_assert (DECL_P (ovar2));
9747 ovar = ovar2;
9749 if (!maybe_lookup_field (ovar, ctx))
9750 continue;
9753 if (nc)
9755 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
9756 tree x = build_sender_ref (ovar, ctx);
9757 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9758 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9759 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9760 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
9762 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9763 tree avar
9764 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
9765 mark_addressable (avar);
9766 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
9767 avar = build_fold_addr_expr (avar);
9768 gimplify_assign (x, avar, &ilist);
9770 else if (is_gimple_reg (var))
9772 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9773 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
9774 mark_addressable (avar);
9775 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
9776 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
9777 gimplify_assign (avar, var, &ilist);
9778 avar = build_fold_addr_expr (avar);
9779 gimplify_assign (x, avar, &ilist);
9780 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
9781 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
9782 && !TYPE_READONLY (TREE_TYPE (var)))
9784 x = build_sender_ref (ovar, ctx);
9785 x = build_simple_mem_ref (x);
9786 gimplify_assign (var, x, &olist);
9789 else
9791 var = build_fold_addr_expr (var);
9792 gimplify_assign (x, var, &ilist);
9795 tree s = OMP_CLAUSE_SIZE (c);
9796 if (s == NULL_TREE)
9797 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
9798 s = fold_convert (size_type_node, s);
9799 tree purpose = size_int (map_idx++);
9800 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
9801 if (TREE_CODE (s) != INTEGER_CST)
9802 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
9804 unsigned char tkind = 0;
9805 switch (OMP_CLAUSE_CODE (c))
9807 case OMP_CLAUSE_MAP:
9808 tkind = OMP_CLAUSE_MAP_KIND (c);
9809 break;
9810 case OMP_CLAUSE_TO:
9811 tkind = OMP_CLAUSE_MAP_TO;
9812 break;
9813 case OMP_CLAUSE_FROM:
9814 tkind = OMP_CLAUSE_MAP_FROM;
9815 break;
9816 default:
9817 gcc_unreachable ();
9819 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
9820 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
9821 talign = DECL_ALIGN_UNIT (ovar);
9822 talign = ceil_log2 (talign);
9823 tkind |= talign << 3;
9824 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
9825 build_int_cst (unsigned_char_type_node,
9826 tkind));
9827 if (nc && nc != c)
9828 c = nc;
9831 gcc_assert (map_idx == map_cnt);
9833 DECL_INITIAL (TREE_VEC_ELT (t, 1))
9834 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
9835 DECL_INITIAL (TREE_VEC_ELT (t, 2))
9836 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
9837 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
9839 gimple_seq initlist = NULL;
9840 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
9841 TREE_VEC_ELT (t, 1)),
9842 &initlist, true, NULL_TREE);
9843 gimple_seq_add_seq (&ilist, initlist);
9845 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
9846 NULL);
9847 TREE_THIS_VOLATILE (clobber) = 1;
9848 gimple_seq_add_stmt (&olist,
9849 gimple_build_assign (TREE_VEC_ELT (t, 1),
9850 clobber));
9853 tree clobber = build_constructor (ctx->record_type, NULL);
9854 TREE_THIS_VOLATILE (clobber) = 1;
9855 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9856 clobber));
9859 /* Once all the expansions are done, sequence all the different
9860 fragments inside gimple_omp_body. */
9862 new_body = NULL;
9864 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
9866 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9867 /* fixup_child_record_type might have changed receiver_decl's type. */
9868 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9869 gimple_seq_add_stmt (&new_body,
9870 gimple_build_assign (ctx->receiver_decl, t));
9873 if (kind == GF_OMP_TARGET_KIND_REGION)
9875 gimple_seq_add_seq (&new_body, tgt_body);
9876 new_body = maybe_catch_exception (new_body);
9878 else if (kind == GF_OMP_TARGET_KIND_DATA)
9879 new_body = tgt_body;
9880 if (kind != GF_OMP_TARGET_KIND_UPDATE)
9882 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9883 gimple_omp_set_body (stmt, new_body);
9886 bind = gimple_build_bind (NULL, NULL,
9887 tgt_bind ? gimple_bind_block (tgt_bind)
9888 : NULL_TREE);
9889 gsi_replace (gsi_p, bind, true);
9890 gimple_bind_add_seq (bind, ilist);
9891 gimple_bind_add_stmt (bind, stmt);
9892 gimple_bind_add_seq (bind, olist);
9894 pop_gimplify_context (NULL);
9897 /* Expand code for an OpenMP teams directive. */
9899 static void
9900 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9902 gimple teams_stmt = gsi_stmt (*gsi_p);
9903 push_gimplify_context ();
9905 tree block = make_node (BLOCK);
9906 gimple bind = gimple_build_bind (NULL, NULL, block);
9907 gsi_replace (gsi_p, bind, true);
9908 gimple_seq bind_body = NULL;
9909 gimple_seq dlist = NULL;
9910 gimple_seq olist = NULL;
9912 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9913 OMP_CLAUSE_NUM_TEAMS);
9914 if (num_teams == NULL_TREE)
9915 num_teams = build_int_cst (unsigned_type_node, 0);
9916 else
9918 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
9919 num_teams = fold_convert (unsigned_type_node, num_teams);
9920 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
9922 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9923 OMP_CLAUSE_THREAD_LIMIT);
9924 if (thread_limit == NULL_TREE)
9925 thread_limit = build_int_cst (unsigned_type_node, 0);
9926 else
9928 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
9929 thread_limit = fold_convert (unsigned_type_node, thread_limit);
9930 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
9931 fb_rvalue);
9934 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
9935 &bind_body, &dlist, ctx, NULL);
9936 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
9937 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
9938 gimple_seq_add_stmt (&bind_body, teams_stmt);
9940 location_t loc = gimple_location (teams_stmt);
9941 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
9942 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
9943 gimple_set_location (call, loc);
9944 gimple_seq_add_stmt (&bind_body, call);
9946 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
9947 gimple_omp_set_body (teams_stmt, NULL);
9948 gimple_seq_add_seq (&bind_body, olist);
9949 gimple_seq_add_seq (&bind_body, dlist);
9950 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
9951 gimple_bind_set_body (bind, bind_body);
9953 pop_gimplify_context (bind);
9955 gimple_bind_append_vars (bind, ctx->block_vars);
9956 BLOCK_VARS (block) = ctx->block_vars;
9957 if (BLOCK_VARS (block))
9958 TREE_USED (block) = 1;
9962 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
9963 regimplified. If DATA is non-NULL, lower_omp_1 is outside
9964 of OpenMP context, but with task_shared_vars set. */
9966 static tree
9967 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
9968 void *data)
9970 tree t = *tp;
9972 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
9973 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
9974 return t;
9976 if (task_shared_vars
9977 && DECL_P (t)
9978 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
9979 return t;
9981 /* If a global variable has been privatized, TREE_CONSTANT on
9982 ADDR_EXPR might be wrong. */
9983 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
9984 recompute_tree_invariant_for_addr_expr (t);
9986 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
9987 return NULL_TREE;
9990 static void
9991 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9993 gimple stmt = gsi_stmt (*gsi_p);
9994 struct walk_stmt_info wi;
9996 if (gimple_has_location (stmt))
9997 input_location = gimple_location (stmt);
9999 if (task_shared_vars)
10000 memset (&wi, '\0', sizeof (wi));
10002 /* If we have issued syntax errors, avoid doing any heavy lifting.
10003 Just replace the OpenMP directives with a NOP to avoid
10004 confusing RTL expansion. */
10005 if (seen_error () && is_gimple_omp (stmt))
10007 gsi_replace (gsi_p, gimple_build_nop (), true);
10008 return;
10011 switch (gimple_code (stmt))
10013 case GIMPLE_COND:
10014 if ((ctx || task_shared_vars)
10015 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
10016 ctx ? NULL : &wi, NULL)
10017 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
10018 ctx ? NULL : &wi, NULL)))
10019 gimple_regimplify_operands (stmt, gsi_p);
10020 break;
10021 case GIMPLE_CATCH:
10022 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
10023 break;
10024 case GIMPLE_EH_FILTER:
10025 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
10026 break;
10027 case GIMPLE_TRY:
10028 lower_omp (gimple_try_eval_ptr (stmt), ctx);
10029 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
10030 break;
10031 case GIMPLE_TRANSACTION:
10032 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
10033 break;
10034 case GIMPLE_BIND:
10035 lower_omp (gimple_bind_body_ptr (stmt), ctx);
10036 break;
10037 case GIMPLE_OMP_PARALLEL:
10038 case GIMPLE_OMP_TASK:
10039 ctx = maybe_lookup_ctx (stmt);
10040 gcc_assert (ctx);
10041 if (ctx->cancellable)
10042 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10043 lower_omp_taskreg (gsi_p, ctx);
10044 break;
10045 case GIMPLE_OMP_FOR:
10046 ctx = maybe_lookup_ctx (stmt);
10047 gcc_assert (ctx);
10048 if (ctx->cancellable)
10049 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10050 lower_omp_for (gsi_p, ctx);
10051 break;
10052 case GIMPLE_OMP_SECTIONS:
10053 ctx = maybe_lookup_ctx (stmt);
10054 gcc_assert (ctx);
10055 if (ctx->cancellable)
10056 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10057 lower_omp_sections (gsi_p, ctx);
10058 break;
10059 case GIMPLE_OMP_SINGLE:
10060 ctx = maybe_lookup_ctx (stmt);
10061 gcc_assert (ctx);
10062 lower_omp_single (gsi_p, ctx);
10063 break;
10064 case GIMPLE_OMP_MASTER:
10065 ctx = maybe_lookup_ctx (stmt);
10066 gcc_assert (ctx);
10067 lower_omp_master (gsi_p, ctx);
10068 break;
10069 case GIMPLE_OMP_TASKGROUP:
10070 ctx = maybe_lookup_ctx (stmt);
10071 gcc_assert (ctx);
10072 lower_omp_taskgroup (gsi_p, ctx);
10073 break;
10074 case GIMPLE_OMP_ORDERED:
10075 ctx = maybe_lookup_ctx (stmt);
10076 gcc_assert (ctx);
10077 lower_omp_ordered (gsi_p, ctx);
10078 break;
10079 case GIMPLE_OMP_CRITICAL:
10080 ctx = maybe_lookup_ctx (stmt);
10081 gcc_assert (ctx);
10082 lower_omp_critical (gsi_p, ctx);
10083 break;
10084 case GIMPLE_OMP_ATOMIC_LOAD:
10085 if ((ctx || task_shared_vars)
10086 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
10087 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
10088 gimple_regimplify_operands (stmt, gsi_p);
10089 break;
10090 case GIMPLE_OMP_TARGET:
10091 ctx = maybe_lookup_ctx (stmt);
10092 gcc_assert (ctx);
10093 lower_omp_target (gsi_p, ctx);
10094 break;
10095 case GIMPLE_OMP_TEAMS:
10096 ctx = maybe_lookup_ctx (stmt);
10097 gcc_assert (ctx);
10098 lower_omp_teams (gsi_p, ctx);
10099 break;
10100 case GIMPLE_CALL:
10101 tree fndecl;
10102 fndecl = gimple_call_fndecl (stmt);
10103 if (fndecl
10104 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10105 switch (DECL_FUNCTION_CODE (fndecl))
10107 case BUILT_IN_GOMP_BARRIER:
10108 if (ctx == NULL)
10109 break;
10110 /* FALLTHRU */
10111 case BUILT_IN_GOMP_CANCEL:
10112 case BUILT_IN_GOMP_CANCELLATION_POINT:
10113 omp_context *cctx;
10114 cctx = ctx;
10115 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10116 cctx = cctx->outer;
10117 gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
10118 if (!cctx->cancellable)
10120 if (DECL_FUNCTION_CODE (fndecl)
10121 == BUILT_IN_GOMP_CANCELLATION_POINT)
10123 stmt = gimple_build_nop ();
10124 gsi_replace (gsi_p, stmt, false);
10126 break;
10128 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10130 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10131 gimple_call_set_fndecl (stmt, fndecl);
10132 gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
10134 tree lhs;
10135 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)), NULL);
10136 gimple_call_set_lhs (stmt, lhs);
10137 tree fallthru_label;
10138 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10139 gimple g;
10140 g = gimple_build_label (fallthru_label);
10141 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10142 g = gimple_build_cond (NE_EXPR, lhs,
10143 fold_convert (TREE_TYPE (lhs),
10144 boolean_false_node),
10145 cctx->cancel_label, fallthru_label);
10146 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10147 break;
10148 default:
10149 break;
10151 /* FALLTHRU */
10152 default:
10153 if ((ctx || task_shared_vars)
10154 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10155 ctx ? NULL : &wi))
10157 /* Just remove clobbers, this should happen only if we have
10158 "privatized" local addressable variables in SIMD regions,
10159 the clobber isn't needed in that case and gimplifying address
10160 of the ARRAY_REF into a pointer and creating MEM_REF based
10161 clobber would create worse code than we get with the clobber
10162 dropped. */
10163 if (gimple_clobber_p (stmt))
10165 gsi_replace (gsi_p, gimple_build_nop (), true);
10166 break;
10168 gimple_regimplify_operands (stmt, gsi_p);
10170 break;
10174 static void
10175 lower_omp (gimple_seq *body, omp_context *ctx)
10177 location_t saved_location = input_location;
10178 gimple_stmt_iterator gsi;
10179 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10180 lower_omp_1 (&gsi, ctx);
10181 /* During gimplification, we have not always invoked fold_stmt
10182 (gimplify.c:maybe_fold_stmt); call it now. */
10183 if (target_nesting_level)
10184 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10185 fold_stmt (&gsi);
10186 input_location = saved_location;
10189 /* Main entry point. */
10191 static unsigned int
10192 execute_lower_omp (void)
10194 gimple_seq body;
10196 /* This pass always runs, to provide PROP_gimple_lomp.
10197 But there is nothing to do unless -fopenmp is given. */
10198 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_cilkplus == 0)
10199 return 0;
10201 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10202 delete_omp_context);
10204 body = gimple_body (current_function_decl);
10205 scan_omp (&body, NULL);
10206 gcc_assert (taskreg_nesting_level == 0);
10208 if (all_contexts->root)
10210 if (task_shared_vars)
10211 push_gimplify_context ();
10212 lower_omp (&body, NULL);
10213 if (task_shared_vars)
10214 pop_gimplify_context (NULL);
10217 if (all_contexts)
10219 splay_tree_delete (all_contexts);
10220 all_contexts = NULL;
10222 BITMAP_FREE (task_shared_vars);
10223 return 0;
10226 namespace {
10228 const pass_data pass_data_lower_omp =
10230 GIMPLE_PASS, /* type */
10231 "omplower", /* name */
10232 OPTGROUP_NONE, /* optinfo_flags */
10233 true, /* has_execute */
10234 TV_NONE, /* tv_id */
10235 PROP_gimple_any, /* properties_required */
10236 PROP_gimple_lomp, /* properties_provided */
10237 0, /* properties_destroyed */
10238 0, /* todo_flags_start */
10239 0, /* todo_flags_finish */
10242 class pass_lower_omp : public gimple_opt_pass
10244 public:
10245 pass_lower_omp (gcc::context *ctxt)
10246 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10249 /* opt_pass methods: */
10250 virtual unsigned int execute (function *) { return execute_lower_omp (); }
10252 }; // class pass_lower_omp
10254 } // anon namespace
10256 gimple_opt_pass *
10257 make_pass_lower_omp (gcc::context *ctxt)
10259 return new pass_lower_omp (ctxt);
10262 /* The following is a utility to diagnose OpenMP structured block violations.
10263 It is not part of the "omplower" pass, as that's invoked too late. It
10264 should be invoked by the respective front ends after gimplification. */
10266 static splay_tree all_labels;
10268 /* Check for mismatched contexts and generate an error if needed. Return
10269 true if an error is detected. */
10271 static bool
10272 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10273 gimple branch_ctx, gimple label_ctx)
10275 if (label_ctx == branch_ctx)
10276 return false;
10280 Previously we kept track of the label's entire context in diagnose_sb_[12]
10281 so we could traverse it and issue a correct "exit" or "enter" error
10282 message upon a structured block violation.
10284 We built the context by building a list with tree_cons'ing, but there is
10285 no easy counterpart in gimple tuples. It seems like far too much work
10286 for issuing exit/enter error messages. If someone really misses the
10287 distinct error message... patches welcome.
10290 #if 0
10291 /* Try to avoid confusing the user by producing and error message
10292 with correct "exit" or "enter" verbiage. We prefer "exit"
10293 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10294 if (branch_ctx == NULL)
10295 exit_p = false;
10296 else
10298 while (label_ctx)
10300 if (TREE_VALUE (label_ctx) == branch_ctx)
10302 exit_p = false;
10303 break;
10305 label_ctx = TREE_CHAIN (label_ctx);
10309 if (exit_p)
10310 error ("invalid exit from OpenMP structured block");
10311 else
10312 error ("invalid entry to OpenMP structured block");
10313 #endif
10315 bool cilkplus_block = false;
10316 if (flag_cilkplus)
10318 if ((branch_ctx
10319 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10320 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10321 || (label_ctx
10322 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
10323 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10324 cilkplus_block = true;
10327 /* If it's obvious we have an invalid entry, be specific about the error. */
10328 if (branch_ctx == NULL)
10330 if (cilkplus_block)
10331 error ("invalid entry to Cilk Plus structured block");
10332 else
10333 error ("invalid entry to OpenMP structured block");
10335 else
10337 /* Otherwise, be vague and lazy, but efficient. */
10338 if (cilkplus_block)
10339 error ("invalid branch to/from a Cilk Plus structured block");
10340 else
10341 error ("invalid branch to/from an OpenMP structured block");
10344 gsi_replace (gsi_p, gimple_build_nop (), false);
10345 return true;
10348 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10349 where each label is found. */
10351 static tree
10352 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10353 struct walk_stmt_info *wi)
10355 gimple context = (gimple) wi->info;
10356 gimple inner_context;
10357 gimple stmt = gsi_stmt (*gsi_p);
10359 *handled_ops_p = true;
10361 switch (gimple_code (stmt))
10363 WALK_SUBSTMTS;
10365 case GIMPLE_OMP_PARALLEL:
10366 case GIMPLE_OMP_TASK:
10367 case GIMPLE_OMP_SECTIONS:
10368 case GIMPLE_OMP_SINGLE:
10369 case GIMPLE_OMP_SECTION:
10370 case GIMPLE_OMP_MASTER:
10371 case GIMPLE_OMP_ORDERED:
10372 case GIMPLE_OMP_CRITICAL:
10373 case GIMPLE_OMP_TARGET:
10374 case GIMPLE_OMP_TEAMS:
10375 case GIMPLE_OMP_TASKGROUP:
10376 /* The minimal context here is just the current OMP construct. */
10377 inner_context = stmt;
10378 wi->info = inner_context;
10379 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10380 wi->info = context;
10381 break;
10383 case GIMPLE_OMP_FOR:
10384 inner_context = stmt;
10385 wi->info = inner_context;
10386 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10387 walk them. */
10388 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10389 diagnose_sb_1, NULL, wi);
10390 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10391 wi->info = context;
10392 break;
10394 case GIMPLE_LABEL:
10395 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
10396 (splay_tree_value) context);
10397 break;
10399 default:
10400 break;
10403 return NULL_TREE;
10406 /* Pass 2: Check each branch and see if its context differs from that of
10407 the destination label's context. */
10409 static tree
10410 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10411 struct walk_stmt_info *wi)
10413 gimple context = (gimple) wi->info;
10414 splay_tree_node n;
10415 gimple stmt = gsi_stmt (*gsi_p);
10417 *handled_ops_p = true;
10419 switch (gimple_code (stmt))
10421 WALK_SUBSTMTS;
10423 case GIMPLE_OMP_PARALLEL:
10424 case GIMPLE_OMP_TASK:
10425 case GIMPLE_OMP_SECTIONS:
10426 case GIMPLE_OMP_SINGLE:
10427 case GIMPLE_OMP_SECTION:
10428 case GIMPLE_OMP_MASTER:
10429 case GIMPLE_OMP_ORDERED:
10430 case GIMPLE_OMP_CRITICAL:
10431 case GIMPLE_OMP_TARGET:
10432 case GIMPLE_OMP_TEAMS:
10433 case GIMPLE_OMP_TASKGROUP:
10434 wi->info = stmt;
10435 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10436 wi->info = context;
10437 break;
10439 case GIMPLE_OMP_FOR:
10440 wi->info = stmt;
10441 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10442 walk them. */
10443 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10444 diagnose_sb_2, NULL, wi);
10445 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10446 wi->info = context;
10447 break;
10449 case GIMPLE_COND:
10451 tree lab = gimple_cond_true_label (stmt);
10452 if (lab)
10454 n = splay_tree_lookup (all_labels,
10455 (splay_tree_key) lab);
10456 diagnose_sb_0 (gsi_p, context,
10457 n ? (gimple) n->value : NULL);
10459 lab = gimple_cond_false_label (stmt);
10460 if (lab)
10462 n = splay_tree_lookup (all_labels,
10463 (splay_tree_key) lab);
10464 diagnose_sb_0 (gsi_p, context,
10465 n ? (gimple) n->value : NULL);
10468 break;
10470 case GIMPLE_GOTO:
10472 tree lab = gimple_goto_dest (stmt);
10473 if (TREE_CODE (lab) != LABEL_DECL)
10474 break;
10476 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10477 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
10479 break;
10481 case GIMPLE_SWITCH:
10483 unsigned int i;
10484 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
10486 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
10487 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10488 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
10489 break;
10492 break;
10494 case GIMPLE_RETURN:
10495 diagnose_sb_0 (gsi_p, context, NULL);
10496 break;
10498 default:
10499 break;
10502 return NULL_TREE;
10505 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10506 codes. */
10507 bool
10508 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
10509 int *region_idx)
10511 gimple last = last_stmt (bb);
10512 enum gimple_code code = gimple_code (last);
10513 struct omp_region *cur_region = *region;
10514 bool fallthru = false;
10516 switch (code)
10518 case GIMPLE_OMP_PARALLEL:
10519 case GIMPLE_OMP_TASK:
10520 case GIMPLE_OMP_FOR:
10521 case GIMPLE_OMP_SINGLE:
10522 case GIMPLE_OMP_TEAMS:
10523 case GIMPLE_OMP_MASTER:
10524 case GIMPLE_OMP_TASKGROUP:
10525 case GIMPLE_OMP_ORDERED:
10526 case GIMPLE_OMP_CRITICAL:
10527 case GIMPLE_OMP_SECTION:
10528 cur_region = new_omp_region (bb, code, cur_region);
10529 fallthru = true;
10530 break;
10532 case GIMPLE_OMP_TARGET:
10533 cur_region = new_omp_region (bb, code, cur_region);
10534 fallthru = true;
10535 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
10536 cur_region = cur_region->outer;
10537 break;
10539 case GIMPLE_OMP_SECTIONS:
10540 cur_region = new_omp_region (bb, code, cur_region);
10541 fallthru = true;
10542 break;
10544 case GIMPLE_OMP_SECTIONS_SWITCH:
10545 fallthru = false;
10546 break;
10548 case GIMPLE_OMP_ATOMIC_LOAD:
10549 case GIMPLE_OMP_ATOMIC_STORE:
10550 fallthru = true;
10551 break;
10553 case GIMPLE_OMP_RETURN:
10554 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
10555 somewhere other than the next block. This will be
10556 created later. */
10557 cur_region->exit = bb;
10558 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
10559 cur_region = cur_region->outer;
10560 break;
10562 case GIMPLE_OMP_CONTINUE:
10563 cur_region->cont = bb;
10564 switch (cur_region->type)
10566 case GIMPLE_OMP_FOR:
10567 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
10568 succs edges as abnormal to prevent splitting
10569 them. */
10570 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
10571 /* Make the loopback edge. */
10572 make_edge (bb, single_succ (cur_region->entry),
10573 EDGE_ABNORMAL);
10575 /* Create an edge from GIMPLE_OMP_FOR to exit, which
10576 corresponds to the case that the body of the loop
10577 is not executed at all. */
10578 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
10579 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
10580 fallthru = false;
10581 break;
10583 case GIMPLE_OMP_SECTIONS:
10584 /* Wire up the edges into and out of the nested sections. */
10586 basic_block switch_bb = single_succ (cur_region->entry);
10588 struct omp_region *i;
10589 for (i = cur_region->inner; i ; i = i->next)
10591 gcc_assert (i->type == GIMPLE_OMP_SECTION);
10592 make_edge (switch_bb, i->entry, 0);
10593 make_edge (i->exit, bb, EDGE_FALLTHRU);
10596 /* Make the loopback edge to the block with
10597 GIMPLE_OMP_SECTIONS_SWITCH. */
10598 make_edge (bb, switch_bb, 0);
10600 /* Make the edge from the switch to exit. */
10601 make_edge (switch_bb, bb->next_bb, 0);
10602 fallthru = false;
10604 break;
10606 default:
10607 gcc_unreachable ();
10609 break;
10611 default:
10612 gcc_unreachable ();
10615 if (*region != cur_region)
10617 *region = cur_region;
10618 if (cur_region)
10619 *region_idx = cur_region->entry->index;
10620 else
10621 *region_idx = 0;
10624 return fallthru;
10627 static unsigned int
10628 diagnose_omp_structured_block_errors (void)
10630 struct walk_stmt_info wi;
10631 gimple_seq body = gimple_body (current_function_decl);
10633 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
10635 memset (&wi, 0, sizeof (wi));
10636 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
10638 memset (&wi, 0, sizeof (wi));
10639 wi.want_locations = true;
10640 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
10642 gimple_set_body (current_function_decl, body);
10644 splay_tree_delete (all_labels);
10645 all_labels = NULL;
10647 return 0;
10650 namespace {
10652 const pass_data pass_data_diagnose_omp_blocks =
10654 GIMPLE_PASS, /* type */
10655 "*diagnose_omp_blocks", /* name */
10656 OPTGROUP_NONE, /* optinfo_flags */
10657 true, /* has_execute */
10658 TV_NONE, /* tv_id */
10659 PROP_gimple_any, /* properties_required */
10660 0, /* properties_provided */
10661 0, /* properties_destroyed */
10662 0, /* todo_flags_start */
10663 0, /* todo_flags_finish */
10666 class pass_diagnose_omp_blocks : public gimple_opt_pass
10668 public:
10669 pass_diagnose_omp_blocks (gcc::context *ctxt)
10670 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
10673 /* opt_pass methods: */
10674 virtual bool gate (function *) { return flag_openmp || flag_cilkplus; }
10675 virtual unsigned int execute (function *)
10677 return diagnose_omp_structured_block_errors ();
10680 }; // class pass_diagnose_omp_blocks
10682 } // anon namespace
10684 gimple_opt_pass *
10685 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
10687 return new pass_diagnose_omp_blocks (ctxt);
10690 /* SIMD clone supporting code. */
10692 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
10693 of arguments to reserve space for. */
10695 static struct cgraph_simd_clone *
10696 simd_clone_struct_alloc (int nargs)
10698 struct cgraph_simd_clone *clone_info;
10699 size_t len = (sizeof (struct cgraph_simd_clone)
10700 + nargs * sizeof (struct cgraph_simd_clone_arg));
10701 clone_info = (struct cgraph_simd_clone *)
10702 ggc_internal_cleared_alloc (len);
10703 return clone_info;
10706 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
10708 static inline void
10709 simd_clone_struct_copy (struct cgraph_simd_clone *to,
10710 struct cgraph_simd_clone *from)
10712 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
10713 + ((from->nargs - from->inbranch)
10714 * sizeof (struct cgraph_simd_clone_arg))));
10717 /* Return vector of parameter types of function FNDECL. This uses
10718 TYPE_ARG_TYPES if available, otherwise falls back to types of
10719 DECL_ARGUMENTS types. */
10721 vec<tree>
10722 simd_clone_vector_of_formal_parm_types (tree fndecl)
10724 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
10725 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
10726 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
10727 unsigned int i;
10728 tree arg;
10729 FOR_EACH_VEC_ELT (args, i, arg)
10730 args[i] = TREE_TYPE (args[i]);
10731 return args;
10734 /* Given a simd function in NODE, extract the simd specific
10735 information from the OMP clauses passed in CLAUSES, and return
10736 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
10737 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
10738 otherwise set to FALSE. */
10740 static struct cgraph_simd_clone *
10741 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
10742 bool *inbranch_specified)
10744 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
10745 tree t;
10746 int n;
10747 *inbranch_specified = false;
10749 n = args.length ();
10750 if (n > 0 && args.last () == void_type_node)
10751 n--;
10753 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
10754 be cloned have a distinctive artificial label in addition to "omp
10755 declare simd". */
10756 bool cilk_clone
10757 = (flag_cilkplus
10758 && lookup_attribute ("cilk simd function",
10759 DECL_ATTRIBUTES (node->decl)));
10761 /* Allocate one more than needed just in case this is an in-branch
10762 clone which will require a mask argument. */
10763 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
10764 clone_info->nargs = n;
10765 clone_info->cilk_elemental = cilk_clone;
10767 if (!clauses)
10769 args.release ();
10770 return clone_info;
10772 clauses = TREE_VALUE (clauses);
10773 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
10774 return clone_info;
10776 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
10778 switch (OMP_CLAUSE_CODE (t))
10780 case OMP_CLAUSE_INBRANCH:
10781 clone_info->inbranch = 1;
10782 *inbranch_specified = true;
10783 break;
10784 case OMP_CLAUSE_NOTINBRANCH:
10785 clone_info->inbranch = 0;
10786 *inbranch_specified = true;
10787 break;
10788 case OMP_CLAUSE_SIMDLEN:
10789 clone_info->simdlen
10790 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
10791 break;
10792 case OMP_CLAUSE_LINEAR:
10794 tree decl = OMP_CLAUSE_DECL (t);
10795 tree step = OMP_CLAUSE_LINEAR_STEP (t);
10796 int argno = TREE_INT_CST_LOW (decl);
10797 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
10799 clone_info->args[argno].arg_type
10800 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
10801 clone_info->args[argno].linear_step = tree_to_shwi (step);
10802 gcc_assert (clone_info->args[argno].linear_step >= 0
10803 && clone_info->args[argno].linear_step < n);
10805 else
10807 if (POINTER_TYPE_P (args[argno]))
10808 step = fold_convert (ssizetype, step);
10809 if (!tree_fits_shwi_p (step))
10811 warning_at (OMP_CLAUSE_LOCATION (t), 0,
10812 "ignoring large linear step");
10813 args.release ();
10814 return NULL;
10816 else if (integer_zerop (step))
10818 warning_at (OMP_CLAUSE_LOCATION (t), 0,
10819 "ignoring zero linear step");
10820 args.release ();
10821 return NULL;
10823 else
10825 clone_info->args[argno].arg_type
10826 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
10827 clone_info->args[argno].linear_step = tree_to_shwi (step);
10830 break;
10832 case OMP_CLAUSE_UNIFORM:
10834 tree decl = OMP_CLAUSE_DECL (t);
10835 int argno = tree_to_uhwi (decl);
10836 clone_info->args[argno].arg_type
10837 = SIMD_CLONE_ARG_TYPE_UNIFORM;
10838 break;
10840 case OMP_CLAUSE_ALIGNED:
10842 tree decl = OMP_CLAUSE_DECL (t);
10843 int argno = tree_to_uhwi (decl);
10844 clone_info->args[argno].alignment
10845 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
10846 break;
10848 default:
10849 break;
10852 args.release ();
10853 return clone_info;
10856 /* Given a SIMD clone in NODE, calculate the characteristic data
10857 type and return the coresponding type. The characteristic data
10858 type is computed as described in the Intel Vector ABI. */
10860 static tree
10861 simd_clone_compute_base_data_type (struct cgraph_node *node,
10862 struct cgraph_simd_clone *clone_info)
10864 tree type = integer_type_node;
10865 tree fndecl = node->decl;
10867 /* a) For non-void function, the characteristic data type is the
10868 return type. */
10869 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
10870 type = TREE_TYPE (TREE_TYPE (fndecl));
10872 /* b) If the function has any non-uniform, non-linear parameters,
10873 then the characteristic data type is the type of the first
10874 such parameter. */
10875 else
10877 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
10878 for (unsigned int i = 0; i < clone_info->nargs; ++i)
10879 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
10881 type = map[i];
10882 break;
10884 map.release ();
10887 /* c) If the characteristic data type determined by a) or b) above
10888 is struct, union, or class type which is pass-by-value (except
10889 for the type that maps to the built-in complex data type), the
10890 characteristic data type is int. */
10891 if (RECORD_OR_UNION_TYPE_P (type)
10892 && !aggregate_value_p (type, NULL)
10893 && TREE_CODE (type) != COMPLEX_TYPE)
10894 return integer_type_node;
10896 /* d) If none of the above three classes is applicable, the
10897 characteristic data type is int. */
10899 return type;
10901 /* e) For Intel Xeon Phi native and offload compilation, if the
10902 resulting characteristic data type is 8-bit or 16-bit integer
10903 data type, the characteristic data type is int. */
10904 /* Well, we don't handle Xeon Phi yet. */
10907 static tree
10908 simd_clone_mangle (struct cgraph_node *node,
10909 struct cgraph_simd_clone *clone_info)
10911 char vecsize_mangle = clone_info->vecsize_mangle;
10912 char mask = clone_info->inbranch ? 'M' : 'N';
10913 unsigned int simdlen = clone_info->simdlen;
10914 unsigned int n;
10915 pretty_printer pp;
10917 gcc_assert (vecsize_mangle && simdlen);
10919 pp_string (&pp, "_ZGV");
10920 pp_character (&pp, vecsize_mangle);
10921 pp_character (&pp, mask);
10922 pp_decimal_int (&pp, simdlen);
10924 for (n = 0; n < clone_info->nargs; ++n)
10926 struct cgraph_simd_clone_arg arg = clone_info->args[n];
10928 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
10929 pp_character (&pp, 'u');
10930 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
10932 gcc_assert (arg.linear_step != 0);
10933 pp_character (&pp, 'l');
10934 if (arg.linear_step > 1)
10935 pp_unsigned_wide_integer (&pp, arg.linear_step);
10936 else if (arg.linear_step < 0)
10938 pp_character (&pp, 'n');
10939 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
10940 arg.linear_step));
10943 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
10945 pp_character (&pp, 's');
10946 pp_unsigned_wide_integer (&pp, arg.linear_step);
10948 else
10949 pp_character (&pp, 'v');
10950 if (arg.alignment)
10952 pp_character (&pp, 'a');
10953 pp_decimal_int (&pp, arg.alignment);
10957 pp_underscore (&pp);
10958 pp_string (&pp,
10959 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)));
10960 const char *str = pp_formatted_text (&pp);
10962 /* If there already is a SIMD clone with the same mangled name, don't
10963 add another one. This can happen e.g. for
10964 #pragma omp declare simd
10965 #pragma omp declare simd simdlen(8)
10966 int foo (int, int);
10967 if the simdlen is assumed to be 8 for the first one, etc. */
10968 for (struct cgraph_node *clone = node->simd_clones; clone;
10969 clone = clone->simdclone->next_clone)
10970 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
10971 str) == 0)
10972 return NULL_TREE;
10974 return get_identifier (str);
10977 /* Create a simd clone of OLD_NODE and return it. */
10979 static struct cgraph_node *
10980 simd_clone_create (struct cgraph_node *old_node)
10982 struct cgraph_node *new_node;
10983 if (old_node->definition)
10985 if (!cgraph_function_with_gimple_body_p (old_node))
10986 return NULL;
10987 cgraph_get_body (old_node);
10988 new_node = cgraph_function_versioning (old_node, vNULL, NULL, NULL,
10989 false, NULL, NULL, "simdclone");
10991 else
10993 tree old_decl = old_node->decl;
10994 tree new_decl = copy_node (old_node->decl);
10995 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
10996 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
10997 SET_DECL_RTL (new_decl, NULL);
10998 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
10999 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
11000 new_node
11001 = cgraph_copy_node_for_versioning (old_node, new_decl, vNULL, NULL);
11002 cgraph_call_function_insertion_hooks (new_node);
11004 if (new_node == NULL)
11005 return new_node;
11007 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
11009 /* The function cgraph_function_versioning () will force the new
11010 symbol local. Undo this, and inherit external visability from
11011 the old node. */
11012 new_node->local.local = old_node->local.local;
11013 new_node->externally_visible = old_node->externally_visible;
11015 return new_node;
11018 /* Adjust the return type of the given function to its appropriate
11019 vector counterpart. Returns a simd array to be used throughout the
11020 function as a return value. */
11022 static tree
11023 simd_clone_adjust_return_type (struct cgraph_node *node)
11025 tree fndecl = node->decl;
11026 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
11027 unsigned int veclen;
11028 tree t;
11030 /* Adjust the function return type. */
11031 if (orig_rettype == void_type_node)
11032 return NULL_TREE;
11033 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
11034 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))
11035 || POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl))))
11036 veclen = node->simdclone->vecsize_int;
11037 else
11038 veclen = node->simdclone->vecsize_float;
11039 veclen /= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl))));
11040 if (veclen > node->simdclone->simdlen)
11041 veclen = node->simdclone->simdlen;
11042 if (veclen == node->simdclone->simdlen)
11043 TREE_TYPE (TREE_TYPE (fndecl))
11044 = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)),
11045 node->simdclone->simdlen);
11046 else
11048 t = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)), veclen);
11049 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
11050 TREE_TYPE (TREE_TYPE (fndecl)) = t;
11052 if (!node->definition)
11053 return NULL_TREE;
11055 t = DECL_RESULT (fndecl);
11056 /* Adjust the DECL_RESULT. */
11057 gcc_assert (TREE_TYPE (t) != void_type_node);
11058 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
11059 relayout_decl (t);
11061 tree atype = build_array_type_nelts (orig_rettype,
11062 node->simdclone->simdlen);
11063 if (veclen != node->simdclone->simdlen)
11064 return build1 (VIEW_CONVERT_EXPR, atype, t);
11066 /* Set up a SIMD array to use as the return value. */
11067 tree retval = create_tmp_var_raw (atype, "retval");
11068 gimple_add_tmp_var (retval);
11069 return retval;
11072 /* Each vector argument has a corresponding array to be used locally
11073 as part of the eventual loop. Create such temporary array and
11074 return it.
11076 PREFIX is the prefix to be used for the temporary.
11078 TYPE is the inner element type.
11080 SIMDLEN is the number of elements. */
11082 static tree
11083 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
11085 tree atype = build_array_type_nelts (type, simdlen);
11086 tree avar = create_tmp_var_raw (atype, prefix);
11087 gimple_add_tmp_var (avar);
11088 return avar;
11091 /* Modify the function argument types to their corresponding vector
11092 counterparts if appropriate. Also, create one array for each simd
11093 argument to be used locally when using the function arguments as
11094 part of the loop.
11096 NODE is the function whose arguments are to be adjusted.
11098 Returns an adjustment vector that will be filled describing how the
11099 argument types will be adjusted. */
11101 static ipa_parm_adjustment_vec
11102 simd_clone_adjust_argument_types (struct cgraph_node *node)
11104 vec<tree> args;
11105 ipa_parm_adjustment_vec adjustments;
11107 if (node->definition)
11108 args = ipa_get_vector_of_formal_parms (node->decl);
11109 else
11110 args = simd_clone_vector_of_formal_parm_types (node->decl);
11111 adjustments.create (args.length ());
11112 unsigned i, j, veclen;
11113 struct ipa_parm_adjustment adj;
11114 for (i = 0; i < node->simdclone->nargs; ++i)
11116 memset (&adj, 0, sizeof (adj));
11117 tree parm = args[i];
11118 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
11119 adj.base_index = i;
11120 adj.base = parm;
11122 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
11123 node->simdclone->args[i].orig_type = parm_type;
11125 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
11127 /* No adjustment necessary for scalar arguments. */
11128 adj.op = IPA_PARM_OP_COPY;
11130 else
11132 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
11133 veclen = node->simdclone->vecsize_int;
11134 else
11135 veclen = node->simdclone->vecsize_float;
11136 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
11137 if (veclen > node->simdclone->simdlen)
11138 veclen = node->simdclone->simdlen;
11139 adj.arg_prefix = "simd";
11140 adj.type = build_vector_type (parm_type, veclen);
11141 node->simdclone->args[i].vector_type = adj.type;
11142 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11144 adjustments.safe_push (adj);
11145 if (j == veclen)
11147 memset (&adj, 0, sizeof (adj));
11148 adj.op = IPA_PARM_OP_NEW;
11149 adj.arg_prefix = "simd";
11150 adj.base_index = i;
11151 adj.type = node->simdclone->args[i].vector_type;
11155 if (node->definition)
11156 node->simdclone->args[i].simd_array
11157 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
11158 parm_type, node->simdclone->simdlen);
11160 adjustments.safe_push (adj);
11163 if (node->simdclone->inbranch)
11165 tree base_type
11166 = simd_clone_compute_base_data_type (node->simdclone->origin,
11167 node->simdclone);
11169 memset (&adj, 0, sizeof (adj));
11170 adj.op = IPA_PARM_OP_NEW;
11171 adj.arg_prefix = "mask";
11173 adj.base_index = i;
11174 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
11175 veclen = node->simdclone->vecsize_int;
11176 else
11177 veclen = node->simdclone->vecsize_float;
11178 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
11179 if (veclen > node->simdclone->simdlen)
11180 veclen = node->simdclone->simdlen;
11181 adj.type = build_vector_type (base_type, veclen);
11182 adjustments.safe_push (adj);
11184 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11185 adjustments.safe_push (adj);
11187 /* We have previously allocated one extra entry for the mask. Use
11188 it and fill it. */
11189 struct cgraph_simd_clone *sc = node->simdclone;
11190 sc->nargs++;
11191 if (node->definition)
11193 sc->args[i].orig_arg
11194 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
11195 sc->args[i].simd_array
11196 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
11198 sc->args[i].orig_type = base_type;
11199 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
11202 if (node->definition)
11203 ipa_modify_formal_parameters (node->decl, adjustments);
11204 else
11206 tree new_arg_types = NULL_TREE, new_reversed;
11207 bool last_parm_void = false;
11208 if (args.length () > 0 && args.last () == void_type_node)
11209 last_parm_void = true;
11211 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
11212 j = adjustments.length ();
11213 for (i = 0; i < j; i++)
11215 struct ipa_parm_adjustment *adj = &adjustments[i];
11216 tree ptype;
11217 if (adj->op == IPA_PARM_OP_COPY)
11218 ptype = args[adj->base_index];
11219 else
11220 ptype = adj->type;
11221 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
11223 new_reversed = nreverse (new_arg_types);
11224 if (last_parm_void)
11226 if (new_reversed)
11227 TREE_CHAIN (new_arg_types) = void_list_node;
11228 else
11229 new_reversed = void_list_node;
11232 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
11233 TYPE_ARG_TYPES (new_type) = new_reversed;
11234 TREE_TYPE (node->decl) = new_type;
11236 adjustments.release ();
11238 args.release ();
11239 return adjustments;
11242 /* Initialize and copy the function arguments in NODE to their
11243 corresponding local simd arrays. Returns a fresh gimple_seq with
11244 the instruction sequence generated. */
11246 static gimple_seq
11247 simd_clone_init_simd_arrays (struct cgraph_node *node,
11248 ipa_parm_adjustment_vec adjustments)
11250 gimple_seq seq = NULL;
11251 unsigned i = 0, j = 0, k;
11253 for (tree arg = DECL_ARGUMENTS (node->decl);
11254 arg;
11255 arg = DECL_CHAIN (arg), i++, j++)
11257 if (adjustments[j].op == IPA_PARM_OP_COPY)
11258 continue;
11260 node->simdclone->args[i].vector_arg = arg;
11262 tree array = node->simdclone->args[i].simd_array;
11263 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
11265 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11266 tree ptr = build_fold_addr_expr (array);
11267 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11268 build_int_cst (ptype, 0));
11269 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11270 gimplify_and_add (t, &seq);
11272 else
11274 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
11275 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11276 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
11278 tree ptr = build_fold_addr_expr (array);
11279 int elemsize;
11280 if (k)
11282 arg = DECL_CHAIN (arg);
11283 j++;
11285 elemsize
11286 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
11287 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11288 build_int_cst (ptype, k * elemsize));
11289 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11290 gimplify_and_add (t, &seq);
11294 return seq;
11297 /* Callback info for ipa_simd_modify_stmt_ops below. */
11299 struct modify_stmt_info {
11300 ipa_parm_adjustment_vec adjustments;
11301 gimple stmt;
11302 /* True if the parent statement was modified by
11303 ipa_simd_modify_stmt_ops. */
11304 bool modified;
11307 /* Callback for walk_gimple_op.
11309 Adjust operands from a given statement as specified in the
11310 adjustments vector in the callback data. */
11312 static tree
11313 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
11315 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
11316 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
11317 tree *orig_tp = tp;
11318 if (TREE_CODE (*tp) == ADDR_EXPR)
11319 tp = &TREE_OPERAND (*tp, 0);
11320 struct ipa_parm_adjustment *cand = NULL;
11321 if (TREE_CODE (*tp) == PARM_DECL)
11322 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
11323 else
11325 if (TYPE_P (*tp))
11326 *walk_subtrees = 0;
11329 tree repl = NULL_TREE;
11330 if (cand)
11331 repl = unshare_expr (cand->new_decl);
11332 else
11334 if (tp != orig_tp)
11336 *walk_subtrees = 0;
11337 bool modified = info->modified;
11338 info->modified = false;
11339 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
11340 if (!info->modified)
11342 info->modified = modified;
11343 return NULL_TREE;
11345 info->modified = modified;
11346 repl = *tp;
11348 else
11349 return NULL_TREE;
11352 if (tp != orig_tp)
11354 repl = build_fold_addr_expr (repl);
11355 gimple stmt
11356 = gimple_build_assign (make_ssa_name (TREE_TYPE (repl), NULL), repl);
11357 repl = gimple_assign_lhs (stmt);
11358 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
11359 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11360 *orig_tp = repl;
11362 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
11364 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
11365 *tp = vce;
11367 else
11368 *tp = repl;
11370 info->modified = true;
11371 return NULL_TREE;
11374 /* Traverse the function body and perform all modifications as
11375 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11376 modified such that the replacement/reduction value will now be an
11377 offset into the corresponding simd_array.
11379 This function will replace all function argument uses with their
11380 corresponding simd array elements, and ajust the return values
11381 accordingly. */
11383 static void
11384 ipa_simd_modify_function_body (struct cgraph_node *node,
11385 ipa_parm_adjustment_vec adjustments,
11386 tree retval_array, tree iter)
11388 basic_block bb;
11389 unsigned int i, j, l;
11391 /* Re-use the adjustments array, but this time use it to replace
11392 every function argument use to an offset into the corresponding
11393 simd_array. */
11394 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
11396 if (!node->simdclone->args[i].vector_arg)
11397 continue;
11399 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
11400 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
11401 adjustments[j].new_decl
11402 = build4 (ARRAY_REF,
11403 basetype,
11404 node->simdclone->args[i].simd_array,
11405 iter,
11406 NULL_TREE, NULL_TREE);
11407 if (adjustments[j].op == IPA_PARM_OP_NONE
11408 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
11409 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
11412 l = adjustments.length ();
11413 for (i = 1; i < num_ssa_names; i++)
11415 tree name = ssa_name (i);
11416 if (name
11417 && SSA_NAME_VAR (name)
11418 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
11420 for (j = 0; j < l; j++)
11421 if (SSA_NAME_VAR (name) == adjustments[j].base
11422 && adjustments[j].new_decl)
11424 tree base_var;
11425 if (adjustments[j].new_ssa_base == NULL_TREE)
11427 base_var
11428 = copy_var_decl (adjustments[j].base,
11429 DECL_NAME (adjustments[j].base),
11430 TREE_TYPE (adjustments[j].base));
11431 adjustments[j].new_ssa_base = base_var;
11433 else
11434 base_var = adjustments[j].new_ssa_base;
11435 if (SSA_NAME_IS_DEFAULT_DEF (name))
11437 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11438 gimple_stmt_iterator gsi = gsi_after_labels (bb);
11439 tree new_decl = unshare_expr (adjustments[j].new_decl);
11440 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
11441 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11442 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
11443 gimple stmt = gimple_build_assign (name, new_decl);
11444 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11446 else
11447 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11452 struct modify_stmt_info info;
11453 info.adjustments = adjustments;
11455 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
11457 gimple_stmt_iterator gsi;
11459 gsi = gsi_start_bb (bb);
11460 while (!gsi_end_p (gsi))
11462 gimple stmt = gsi_stmt (gsi);
11463 info.stmt = stmt;
11464 struct walk_stmt_info wi;
11466 memset (&wi, 0, sizeof (wi));
11467 info.modified = false;
11468 wi.info = &info;
11469 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
11471 if (gimple_code (stmt) == GIMPLE_RETURN)
11473 tree retval = gimple_return_retval (stmt);
11474 if (!retval)
11476 gsi_remove (&gsi, true);
11477 continue;
11480 /* Replace `return foo' with `retval_array[iter] = foo'. */
11481 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
11482 retval_array, iter, NULL, NULL);
11483 stmt = gimple_build_assign (ref, retval);
11484 gsi_replace (&gsi, stmt, true);
11485 info.modified = true;
11488 if (info.modified)
11490 update_stmt (stmt);
11491 if (maybe_clean_eh_stmt (stmt))
11492 gimple_purge_dead_eh_edges (gimple_bb (stmt));
11494 gsi_next (&gsi);
11499 /* Adjust the argument types in NODE to their appropriate vector
11500 counterparts. */
11502 static void
11503 simd_clone_adjust (struct cgraph_node *node)
11505 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
11507 targetm.simd_clone.adjust (node);
11509 tree retval = simd_clone_adjust_return_type (node);
11510 ipa_parm_adjustment_vec adjustments
11511 = simd_clone_adjust_argument_types (node);
11513 push_gimplify_context ();
11515 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
11517 /* Adjust all uses of vector arguments accordingly. Adjust all
11518 return values accordingly. */
11519 tree iter = create_tmp_var (unsigned_type_node, "iter");
11520 tree iter1 = make_ssa_name (iter, NULL);
11521 tree iter2 = make_ssa_name (iter, NULL);
11522 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
11524 /* Initialize the iteration variable. */
11525 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11526 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
11527 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
11528 /* Insert the SIMD array and iv initialization at function
11529 entry. */
11530 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
11532 pop_gimplify_context (NULL);
11534 /* Create a new BB right before the original exit BB, to hold the
11535 iteration increment and the condition/branch. */
11536 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
11537 basic_block incr_bb = create_empty_bb (orig_exit);
11538 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
11539 flag. Set it now to be a FALLTHRU_EDGE. */
11540 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
11541 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
11542 for (unsigned i = 0;
11543 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
11545 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
11546 redirect_edge_succ (e, incr_bb);
11548 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
11549 e->probability = REG_BR_PROB_BASE;
11550 gsi = gsi_last_bb (incr_bb);
11551 gimple g = gimple_build_assign_with_ops (PLUS_EXPR, iter2, iter1,
11552 build_int_cst (unsigned_type_node,
11553 1));
11554 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11556 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
11557 struct loop *loop = alloc_loop ();
11558 cfun->has_force_vectorize_loops = true;
11559 loop->safelen = node->simdclone->simdlen;
11560 loop->force_vectorize = true;
11561 loop->header = body_bb;
11562 add_bb_to_loop (incr_bb, loop);
11564 /* Branch around the body if the mask applies. */
11565 if (node->simdclone->inbranch)
11567 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
11568 tree mask_array
11569 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
11570 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)), NULL);
11571 tree aref = build4 (ARRAY_REF,
11572 TREE_TYPE (TREE_TYPE (mask_array)),
11573 mask_array, iter1,
11574 NULL, NULL);
11575 g = gimple_build_assign (mask, aref);
11576 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11577 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
11578 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
11580 aref = build1 (VIEW_CONVERT_EXPR,
11581 build_nonstandard_integer_type (bitsize, 0), mask);
11582 mask = make_ssa_name (TREE_TYPE (aref), NULL);
11583 g = gimple_build_assign (mask, aref);
11584 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11587 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
11588 NULL, NULL);
11589 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11590 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
11591 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
11594 /* Generate the condition. */
11595 g = gimple_build_cond (LT_EXPR,
11596 iter2,
11597 build_int_cst (unsigned_type_node,
11598 node->simdclone->simdlen),
11599 NULL, NULL);
11600 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11601 e = split_block (incr_bb, gsi_stmt (gsi));
11602 basic_block latch_bb = e->dest;
11603 basic_block new_exit_bb = e->dest;
11604 new_exit_bb = split_block (latch_bb, NULL)->dest;
11605 loop->latch = latch_bb;
11607 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
11609 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
11610 /* The successor of incr_bb is already pointing to latch_bb; just
11611 change the flags.
11612 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
11613 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
11615 gimple phi = create_phi_node (iter1, body_bb);
11616 edge preheader_edge = find_edge (entry_bb, body_bb);
11617 edge latch_edge = single_succ_edge (latch_bb);
11618 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
11619 UNKNOWN_LOCATION);
11620 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
11622 /* Generate the new return. */
11623 gsi = gsi_last_bb (new_exit_bb);
11624 if (retval
11625 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
11626 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
11627 retval = TREE_OPERAND (retval, 0);
11628 else if (retval)
11630 retval = build1 (VIEW_CONVERT_EXPR,
11631 TREE_TYPE (TREE_TYPE (node->decl)),
11632 retval);
11633 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
11634 false, GSI_CONTINUE_LINKING);
11636 g = gimple_build_return (retval);
11637 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11639 /* Handle aligned clauses by replacing default defs of the aligned
11640 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
11641 lhs. Handle linear by adding PHIs. */
11642 for (unsigned i = 0; i < node->simdclone->nargs; i++)
11643 if (node->simdclone->args[i].alignment
11644 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
11645 && (node->simdclone->args[i].alignment
11646 & (node->simdclone->args[i].alignment - 1)) == 0
11647 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
11648 == POINTER_TYPE)
11650 unsigned int alignment = node->simdclone->args[i].alignment;
11651 tree orig_arg = node->simdclone->args[i].orig_arg;
11652 tree def = ssa_default_def (cfun, orig_arg);
11653 if (def && !has_zero_uses (def))
11655 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
11656 gimple_seq seq = NULL;
11657 bool need_cvt = false;
11658 gimple call
11659 = gimple_build_call (fn, 2, def, size_int (alignment));
11660 g = call;
11661 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
11662 ptr_type_node))
11663 need_cvt = true;
11664 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg, NULL);
11665 gimple_call_set_lhs (g, t);
11666 gimple_seq_add_stmt_without_update (&seq, g);
11667 if (need_cvt)
11669 t = make_ssa_name (orig_arg, NULL);
11670 g = gimple_build_assign_with_ops (NOP_EXPR, t,
11671 gimple_call_lhs (g),
11672 NULL_TREE);
11673 gimple_seq_add_stmt_without_update (&seq, g);
11675 gsi_insert_seq_on_edge_immediate
11676 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
11678 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11679 int freq = compute_call_stmt_bb_frequency (current_function_decl,
11680 entry_bb);
11681 cgraph_create_edge (node, cgraph_get_create_node (fn),
11682 call, entry_bb->count, freq);
11684 imm_use_iterator iter;
11685 use_operand_p use_p;
11686 gimple use_stmt;
11687 tree repl = gimple_get_lhs (g);
11688 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
11689 if (is_gimple_debug (use_stmt) || use_stmt == call)
11690 continue;
11691 else
11692 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
11693 SET_USE (use_p, repl);
11696 else if (node->simdclone->args[i].arg_type
11697 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
11699 tree orig_arg = node->simdclone->args[i].orig_arg;
11700 tree def = ssa_default_def (cfun, orig_arg);
11701 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
11702 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
11703 if (def && !has_zero_uses (def))
11705 iter1 = make_ssa_name (orig_arg, NULL);
11706 iter2 = make_ssa_name (orig_arg, NULL);
11707 phi = create_phi_node (iter1, body_bb);
11708 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
11709 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
11710 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
11711 ? PLUS_EXPR : POINTER_PLUS_EXPR;
11712 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
11713 ? TREE_TYPE (orig_arg) : sizetype;
11714 tree addcst
11715 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
11716 g = gimple_build_assign_with_ops (code, iter2, iter1, addcst);
11717 gsi = gsi_last_bb (incr_bb);
11718 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
11720 imm_use_iterator iter;
11721 use_operand_p use_p;
11722 gimple use_stmt;
11723 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
11724 if (use_stmt == phi)
11725 continue;
11726 else
11727 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
11728 SET_USE (use_p, iter1);
11732 calculate_dominance_info (CDI_DOMINATORS);
11733 add_loop (loop, loop->header->loop_father);
11734 update_ssa (TODO_update_ssa);
11736 pop_cfun ();
11739 /* If the function in NODE is tagged as an elemental SIMD function,
11740 create the appropriate SIMD clones. */
11742 static void
11743 expand_simd_clones (struct cgraph_node *node)
11745 tree attr = lookup_attribute ("omp declare simd",
11746 DECL_ATTRIBUTES (node->decl));
11747 if (attr == NULL_TREE
11748 || node->global.inlined_to
11749 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
11750 return;
11752 /* Ignore
11753 #pragma omp declare simd
11754 extern int foo ();
11755 in C, there we don't know the argument types at all. */
11756 if (!node->definition
11757 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
11758 return;
11762 /* Start with parsing the "omp declare simd" attribute(s). */
11763 bool inbranch_clause_specified;
11764 struct cgraph_simd_clone *clone_info
11765 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
11766 &inbranch_clause_specified);
11767 if (clone_info == NULL)
11768 continue;
11770 int orig_simdlen = clone_info->simdlen;
11771 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
11772 /* The target can return 0 (no simd clones should be created),
11773 1 (just one ISA of simd clones should be created) or higher
11774 count of ISA variants. In that case, clone_info is initialized
11775 for the first ISA variant. */
11776 int count
11777 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
11778 base_type, 0);
11779 if (count == 0)
11780 continue;
11782 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
11783 also create one inbranch and one !inbranch clone of it. */
11784 for (int i = 0; i < count * 2; i++)
11786 struct cgraph_simd_clone *clone = clone_info;
11787 if (inbranch_clause_specified && (i & 1) != 0)
11788 continue;
11790 if (i != 0)
11792 clone = simd_clone_struct_alloc (clone_info->nargs
11793 + ((i & 1) != 0));
11794 simd_clone_struct_copy (clone, clone_info);
11795 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
11796 and simd_clone_adjust_argument_types did to the first
11797 clone's info. */
11798 clone->nargs -= clone_info->inbranch;
11799 clone->simdlen = orig_simdlen;
11800 /* And call the target hook again to get the right ISA. */
11801 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
11802 base_type,
11803 i / 2);
11804 if ((i & 1) != 0)
11805 clone->inbranch = 1;
11808 /* simd_clone_mangle might fail if such a clone has been created
11809 already. */
11810 tree id = simd_clone_mangle (node, clone);
11811 if (id == NULL_TREE)
11812 continue;
11814 /* Only when we are sure we want to create the clone actually
11815 clone the function (or definitions) or create another
11816 extern FUNCTION_DECL (for prototypes without definitions). */
11817 struct cgraph_node *n = simd_clone_create (node);
11818 if (n == NULL)
11819 continue;
11821 n->simdclone = clone;
11822 clone->origin = node;
11823 clone->next_clone = NULL;
11824 if (node->simd_clones == NULL)
11826 clone->prev_clone = n;
11827 node->simd_clones = n;
11829 else
11831 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
11832 clone->prev_clone->simdclone->next_clone = n;
11833 node->simd_clones->simdclone->prev_clone = n;
11835 change_decl_assembler_name (n->decl, id);
11836 /* And finally adjust the return type, parameters and for
11837 definitions also function body. */
11838 if (node->definition)
11839 simd_clone_adjust (n);
11840 else
11842 simd_clone_adjust_return_type (n);
11843 simd_clone_adjust_argument_types (n);
11847 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
11850 /* Entry point for IPA simd clone creation pass. */
11852 static unsigned int
11853 ipa_omp_simd_clone (void)
11855 struct cgraph_node *node;
11856 FOR_EACH_FUNCTION (node)
11857 expand_simd_clones (node);
11858 return 0;
11861 namespace {
11863 const pass_data pass_data_omp_simd_clone =
11865 SIMPLE_IPA_PASS, /* type */
11866 "simdclone", /* name */
11867 OPTGROUP_NONE, /* optinfo_flags */
11868 true, /* has_execute */
11869 TV_NONE, /* tv_id */
11870 ( PROP_ssa | PROP_cfg ), /* properties_required */
11871 0, /* properties_provided */
11872 0, /* properties_destroyed */
11873 0, /* todo_flags_start */
11874 0, /* todo_flags_finish */
11877 class pass_omp_simd_clone : public simple_ipa_opt_pass
11879 public:
11880 pass_omp_simd_clone(gcc::context *ctxt)
11881 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
11884 /* opt_pass methods: */
11885 virtual bool gate (function *);
11886 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
11889 bool
11890 pass_omp_simd_clone::gate (function *)
11892 return ((flag_openmp || flag_openmp_simd
11893 || flag_cilkplus
11894 || (in_lto_p && !flag_wpa))
11895 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
11898 } // anon namespace
11900 simple_ipa_opt_pass *
11901 make_pass_omp_simd_clone (gcc::context *ctxt)
11903 return new pass_omp_simd_clone (ctxt);
11906 #include "gt-omp-low.h"