except.c: Use rtx_sequence
[official-gcc.git] / gcc / omp-low.c
blob9d1cd2ce0f8d62309aa6c94f487d5c98d67e00ff
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "rtl.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "gimple-fold.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-walk.h"
43 #include "tree-iterator.h"
44 #include "tree-inline.h"
45 #include "langhooks.h"
46 #include "diagnostic-core.h"
47 #include "gimple-ssa.h"
48 #include "cgraph.h"
49 #include "tree-cfg.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "tree-ssanames.h"
53 #include "tree-into-ssa.h"
54 #include "expr.h"
55 #include "tree-dfa.h"
56 #include "tree-ssa.h"
57 #include "flags.h"
58 #include "function.h"
59 #include "expr.h"
60 #include "tree-pass.h"
61 #include "except.h"
62 #include "splay-tree.h"
63 #include "optabs.h"
64 #include "cfgloop.h"
65 #include "target.h"
66 #include "omp-low.h"
67 #include "gimple-low.h"
68 #include "tree-cfgcleanup.h"
69 #include "pretty-print.h"
70 #include "ipa-prop.h"
71 #include "tree-nested.h"
72 #include "tree-eh.h"
75 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
76 phases. The first phase scans the function looking for OMP statements
77 and then for variables that must be replaced to satisfy data sharing
78 clauses. The second phase expands code for the constructs, as well as
79 re-gimplifying things when variables have been replaced with complex
80 expressions.
82 Final code generation is done by pass_expand_omp. The flowgraph is
83 scanned for parallel regions which are then moved to a new
84 function, to be invoked by the thread library. */
86 /* Parallel region information. Every parallel and workshare
87 directive is enclosed between two markers, the OMP_* directive
88 and a corresponding OMP_RETURN statement. */
90 struct omp_region
92 /* The enclosing region. */
93 struct omp_region *outer;
95 /* First child region. */
96 struct omp_region *inner;
98 /* Next peer region. */
99 struct omp_region *next;
101 /* Block containing the omp directive as its last stmt. */
102 basic_block entry;
104 /* Block containing the OMP_RETURN as its last stmt. */
105 basic_block exit;
107 /* Block containing the OMP_CONTINUE as its last stmt. */
108 basic_block cont;
110 /* If this is a combined parallel+workshare region, this is a list
111 of additional arguments needed by the combined parallel+workshare
112 library call. */
113 vec<tree, va_gc> *ws_args;
115 /* The code for the omp directive of this region. */
116 enum gimple_code type;
118 /* Schedule kind, only used for OMP_FOR type regions. */
119 enum omp_clause_schedule_kind sched_kind;
121 /* True if this is a combined parallel+workshare region. */
122 bool is_combined_parallel;
125 /* Context structure. Used to store information about each parallel
126 directive in the code. */
128 typedef struct omp_context
130 /* This field must be at the beginning, as we do "inheritance": Some
131 callback functions for tree-inline.c (e.g., omp_copy_decl)
132 receive a copy_body_data pointer that is up-casted to an
133 omp_context pointer. */
134 copy_body_data cb;
136 /* The tree of contexts corresponding to the encountered constructs. */
137 struct omp_context *outer;
138 gimple stmt;
140 /* Map variables to fields in a structure that allows communication
141 between sending and receiving threads. */
142 splay_tree field_map;
143 tree record_type;
144 tree sender_decl;
145 tree receiver_decl;
147 /* These are used just by task contexts, if task firstprivate fn is
148 needed. srecord_type is used to communicate from the thread
149 that encountered the task construct to task firstprivate fn,
150 record_type is allocated by GOMP_task, initialized by task firstprivate
151 fn and passed to the task body fn. */
152 splay_tree sfield_map;
153 tree srecord_type;
155 /* A chain of variables to add to the top-level block surrounding the
156 construct. In the case of a parallel, this is in the child function. */
157 tree block_vars;
159 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
160 barriers should jump to during omplower pass. */
161 tree cancel_label;
163 /* What to do with variables with implicitly determined sharing
164 attributes. */
165 enum omp_clause_default_kind default_kind;
167 /* Nesting depth of this context. Used to beautify error messages re
168 invalid gotos. The outermost ctx is depth 1, with depth 0 being
169 reserved for the main body of the function. */
170 int depth;
172 /* True if this parallel directive is nested within another. */
173 bool is_nested;
175 /* True if this construct can be cancelled. */
176 bool cancellable;
177 } omp_context;
180 struct omp_for_data_loop
182 tree v, n1, n2, step;
183 enum tree_code cond_code;
186 /* A structure describing the main elements of a parallel loop. */
188 struct omp_for_data
190 struct omp_for_data_loop loop;
191 tree chunk_size;
192 gimple for_stmt;
193 tree pre, iter_type;
194 int collapse;
195 bool have_nowait, have_ordered;
196 enum omp_clause_schedule_kind sched_kind;
197 struct omp_for_data_loop *loops;
201 static splay_tree all_contexts;
202 static int taskreg_nesting_level;
203 static int target_nesting_level;
204 static struct omp_region *root_omp_region;
205 static bitmap task_shared_vars;
207 static void scan_omp (gimple_seq *, omp_context *);
208 static tree scan_omp_1_op (tree *, int *, void *);
210 #define WALK_SUBSTMTS \
211 case GIMPLE_BIND: \
212 case GIMPLE_TRY: \
213 case GIMPLE_CATCH: \
214 case GIMPLE_EH_FILTER: \
215 case GIMPLE_TRANSACTION: \
216 /* The sub-statements for these should be walked. */ \
217 *handled_ops_p = false; \
218 break;
220 /* Convenience function for calling scan_omp_1_op on tree operands. */
222 static inline tree
223 scan_omp_op (tree *tp, omp_context *ctx)
225 struct walk_stmt_info wi;
227 memset (&wi, 0, sizeof (wi));
228 wi.info = ctx;
229 wi.want_locations = true;
231 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
234 static void lower_omp (gimple_seq *, omp_context *);
235 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
236 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
238 /* Find an OpenMP clause of type KIND within CLAUSES. */
240 tree
241 find_omp_clause (tree clauses, enum omp_clause_code kind)
243 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
244 if (OMP_CLAUSE_CODE (clauses) == kind)
245 return clauses;
247 return NULL_TREE;
250 /* Return true if CTX is for an omp parallel. */
252 static inline bool
253 is_parallel_ctx (omp_context *ctx)
255 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
259 /* Return true if CTX is for an omp task. */
261 static inline bool
262 is_task_ctx (omp_context *ctx)
264 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
268 /* Return true if CTX is for an omp parallel or omp task. */
270 static inline bool
271 is_taskreg_ctx (omp_context *ctx)
273 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
274 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
278 /* Return true if REGION is a combined parallel+workshare region. */
280 static inline bool
281 is_combined_parallel (struct omp_region *region)
283 return region->is_combined_parallel;
287 /* Extract the header elements of parallel loop FOR_STMT and store
288 them into *FD. */
290 static void
291 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
292 struct omp_for_data_loop *loops)
294 tree t, var, *collapse_iter, *collapse_count;
295 tree count = NULL_TREE, iter_type = long_integer_type_node;
296 struct omp_for_data_loop *loop;
297 int i;
298 struct omp_for_data_loop dummy_loop;
299 location_t loc = gimple_location (for_stmt);
300 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
301 bool distribute = gimple_omp_for_kind (for_stmt)
302 == GF_OMP_FOR_KIND_DISTRIBUTE;
304 fd->for_stmt = for_stmt;
305 fd->pre = NULL;
306 fd->collapse = gimple_omp_for_collapse (for_stmt);
307 if (fd->collapse > 1)
308 fd->loops = loops;
309 else
310 fd->loops = &fd->loop;
312 fd->have_nowait = distribute || simd;
313 fd->have_ordered = false;
314 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
315 fd->chunk_size = NULL_TREE;
316 collapse_iter = NULL;
317 collapse_count = NULL;
319 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
320 switch (OMP_CLAUSE_CODE (t))
322 case OMP_CLAUSE_NOWAIT:
323 fd->have_nowait = true;
324 break;
325 case OMP_CLAUSE_ORDERED:
326 fd->have_ordered = true;
327 break;
328 case OMP_CLAUSE_SCHEDULE:
329 gcc_assert (!distribute);
330 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
331 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
332 break;
333 case OMP_CLAUSE_DIST_SCHEDULE:
334 gcc_assert (distribute);
335 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
336 break;
337 case OMP_CLAUSE_COLLAPSE:
338 if (fd->collapse > 1)
340 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
341 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
343 break;
344 default:
345 break;
348 /* FIXME: for now map schedule(auto) to schedule(static).
349 There should be analysis to determine whether all iterations
350 are approximately the same amount of work (then schedule(static)
351 is best) or if it varies (then schedule(dynamic,N) is better). */
352 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
354 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
355 gcc_assert (fd->chunk_size == NULL);
357 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
358 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
359 gcc_assert (fd->chunk_size == NULL);
360 else if (fd->chunk_size == NULL)
362 /* We only need to compute a default chunk size for ordered
363 static loops and dynamic loops. */
364 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
365 || fd->have_ordered)
366 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
367 ? integer_zero_node : integer_one_node;
370 for (i = 0; i < fd->collapse; i++)
372 if (fd->collapse == 1)
373 loop = &fd->loop;
374 else if (loops != NULL)
375 loop = loops + i;
376 else
377 loop = &dummy_loop;
379 loop->v = gimple_omp_for_index (for_stmt, i);
380 gcc_assert (SSA_VAR_P (loop->v));
381 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
382 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
383 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
384 loop->n1 = gimple_omp_for_initial (for_stmt, i);
386 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
387 loop->n2 = gimple_omp_for_final (for_stmt, i);
388 switch (loop->cond_code)
390 case LT_EXPR:
391 case GT_EXPR:
392 break;
393 case NE_EXPR:
394 gcc_assert (gimple_omp_for_kind (for_stmt)
395 == GF_OMP_FOR_KIND_CILKSIMD);
396 break;
397 case LE_EXPR:
398 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
399 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
400 else
401 loop->n2 = fold_build2_loc (loc,
402 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
403 build_int_cst (TREE_TYPE (loop->n2), 1));
404 loop->cond_code = LT_EXPR;
405 break;
406 case GE_EXPR:
407 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
408 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
409 else
410 loop->n2 = fold_build2_loc (loc,
411 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
412 build_int_cst (TREE_TYPE (loop->n2), 1));
413 loop->cond_code = GT_EXPR;
414 break;
415 default:
416 gcc_unreachable ();
419 t = gimple_omp_for_incr (for_stmt, i);
420 gcc_assert (TREE_OPERAND (t, 0) == var);
421 switch (TREE_CODE (t))
423 case PLUS_EXPR:
424 loop->step = TREE_OPERAND (t, 1);
425 break;
426 case POINTER_PLUS_EXPR:
427 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
428 break;
429 case MINUS_EXPR:
430 loop->step = TREE_OPERAND (t, 1);
431 loop->step = fold_build1_loc (loc,
432 NEGATE_EXPR, TREE_TYPE (loop->step),
433 loop->step);
434 break;
435 default:
436 gcc_unreachable ();
439 if (simd
440 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
441 && !fd->have_ordered))
443 if (fd->collapse == 1)
444 iter_type = TREE_TYPE (loop->v);
445 else if (i == 0
446 || TYPE_PRECISION (iter_type)
447 < TYPE_PRECISION (TREE_TYPE (loop->v)))
448 iter_type
449 = build_nonstandard_integer_type
450 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
452 else if (iter_type != long_long_unsigned_type_node)
454 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
455 iter_type = long_long_unsigned_type_node;
456 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
457 && TYPE_PRECISION (TREE_TYPE (loop->v))
458 >= TYPE_PRECISION (iter_type))
460 tree n;
462 if (loop->cond_code == LT_EXPR)
463 n = fold_build2_loc (loc,
464 PLUS_EXPR, TREE_TYPE (loop->v),
465 loop->n2, loop->step);
466 else
467 n = loop->n1;
468 if (TREE_CODE (n) != INTEGER_CST
469 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
470 iter_type = long_long_unsigned_type_node;
472 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
473 > TYPE_PRECISION (iter_type))
475 tree n1, n2;
477 if (loop->cond_code == LT_EXPR)
479 n1 = loop->n1;
480 n2 = fold_build2_loc (loc,
481 PLUS_EXPR, TREE_TYPE (loop->v),
482 loop->n2, loop->step);
484 else
486 n1 = fold_build2_loc (loc,
487 MINUS_EXPR, TREE_TYPE (loop->v),
488 loop->n2, loop->step);
489 n2 = loop->n1;
491 if (TREE_CODE (n1) != INTEGER_CST
492 || TREE_CODE (n2) != INTEGER_CST
493 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
494 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
495 iter_type = long_long_unsigned_type_node;
499 if (collapse_count && *collapse_count == NULL)
501 t = fold_binary (loop->cond_code, boolean_type_node,
502 fold_convert (TREE_TYPE (loop->v), loop->n1),
503 fold_convert (TREE_TYPE (loop->v), loop->n2));
504 if (t && integer_zerop (t))
505 count = build_zero_cst (long_long_unsigned_type_node);
506 else if ((i == 0 || count != NULL_TREE)
507 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
508 && TREE_CONSTANT (loop->n1)
509 && TREE_CONSTANT (loop->n2)
510 && TREE_CODE (loop->step) == INTEGER_CST)
512 tree itype = TREE_TYPE (loop->v);
514 if (POINTER_TYPE_P (itype))
515 itype = signed_type_for (itype);
516 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
517 t = fold_build2_loc (loc,
518 PLUS_EXPR, itype,
519 fold_convert_loc (loc, itype, loop->step), t);
520 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
521 fold_convert_loc (loc, itype, loop->n2));
522 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
523 fold_convert_loc (loc, itype, loop->n1));
524 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
525 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
526 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
527 fold_build1_loc (loc, NEGATE_EXPR, itype,
528 fold_convert_loc (loc, itype,
529 loop->step)));
530 else
531 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
532 fold_convert_loc (loc, itype, loop->step));
533 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
534 if (count != NULL_TREE)
535 count = fold_build2_loc (loc,
536 MULT_EXPR, long_long_unsigned_type_node,
537 count, t);
538 else
539 count = t;
540 if (TREE_CODE (count) != INTEGER_CST)
541 count = NULL_TREE;
543 else if (count && !integer_zerop (count))
544 count = NULL_TREE;
548 if (count
549 && !simd
550 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
551 || fd->have_ordered))
553 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
554 iter_type = long_long_unsigned_type_node;
555 else
556 iter_type = long_integer_type_node;
558 else if (collapse_iter && *collapse_iter != NULL)
559 iter_type = TREE_TYPE (*collapse_iter);
560 fd->iter_type = iter_type;
561 if (collapse_iter && *collapse_iter == NULL)
562 *collapse_iter = create_tmp_var (iter_type, ".iter");
563 if (collapse_count && *collapse_count == NULL)
565 if (count)
566 *collapse_count = fold_convert_loc (loc, iter_type, count);
567 else
568 *collapse_count = create_tmp_var (iter_type, ".count");
571 if (fd->collapse > 1)
573 fd->loop.v = *collapse_iter;
574 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
575 fd->loop.n2 = *collapse_count;
576 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
577 fd->loop.cond_code = LT_EXPR;
582 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
583 is the immediate dominator of PAR_ENTRY_BB, return true if there
584 are no data dependencies that would prevent expanding the parallel
585 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
587 When expanding a combined parallel+workshare region, the call to
588 the child function may need additional arguments in the case of
589 GIMPLE_OMP_FOR regions. In some cases, these arguments are
590 computed out of variables passed in from the parent to the child
591 via 'struct .omp_data_s'. For instance:
593 #pragma omp parallel for schedule (guided, i * 4)
594 for (j ...)
596 Is lowered into:
598 # BLOCK 2 (PAR_ENTRY_BB)
599 .omp_data_o.i = i;
600 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
602 # BLOCK 3 (WS_ENTRY_BB)
603 .omp_data_i = &.omp_data_o;
604 D.1667 = .omp_data_i->i;
605 D.1598 = D.1667 * 4;
606 #pragma omp for schedule (guided, D.1598)
608 When we outline the parallel region, the call to the child function
609 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
610 that value is computed *after* the call site. So, in principle we
611 cannot do the transformation.
613 To see whether the code in WS_ENTRY_BB blocks the combined
614 parallel+workshare call, we collect all the variables used in the
615 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
616 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
617 call.
619 FIXME. If we had the SSA form built at this point, we could merely
620 hoist the code in block 3 into block 2 and be done with it. But at
621 this point we don't have dataflow information and though we could
622 hack something up here, it is really not worth the aggravation. */
624 static bool
625 workshare_safe_to_combine_p (basic_block ws_entry_bb)
627 struct omp_for_data fd;
628 gimple ws_stmt = last_stmt (ws_entry_bb);
630 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
631 return true;
633 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
635 extract_omp_for_data (ws_stmt, &fd, NULL);
637 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
638 return false;
639 if (fd.iter_type != long_integer_type_node)
640 return false;
642 /* FIXME. We give up too easily here. If any of these arguments
643 are not constants, they will likely involve variables that have
644 been mapped into fields of .omp_data_s for sharing with the child
645 function. With appropriate data flow, it would be possible to
646 see through this. */
647 if (!is_gimple_min_invariant (fd.loop.n1)
648 || !is_gimple_min_invariant (fd.loop.n2)
649 || !is_gimple_min_invariant (fd.loop.step)
650 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
651 return false;
653 return true;
657 /* Collect additional arguments needed to emit a combined
658 parallel+workshare call. WS_STMT is the workshare directive being
659 expanded. */
661 static vec<tree, va_gc> *
662 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
664 tree t;
665 location_t loc = gimple_location (ws_stmt);
666 vec<tree, va_gc> *ws_args;
668 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
670 struct omp_for_data fd;
671 tree n1, n2;
673 extract_omp_for_data (ws_stmt, &fd, NULL);
674 n1 = fd.loop.n1;
675 n2 = fd.loop.n2;
677 if (gimple_omp_for_combined_into_p (ws_stmt))
679 tree innerc
680 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
681 OMP_CLAUSE__LOOPTEMP_);
682 gcc_assert (innerc);
683 n1 = OMP_CLAUSE_DECL (innerc);
684 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
685 OMP_CLAUSE__LOOPTEMP_);
686 gcc_assert (innerc);
687 n2 = OMP_CLAUSE_DECL (innerc);
690 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
692 t = fold_convert_loc (loc, long_integer_type_node, n1);
693 ws_args->quick_push (t);
695 t = fold_convert_loc (loc, long_integer_type_node, n2);
696 ws_args->quick_push (t);
698 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
699 ws_args->quick_push (t);
701 if (fd.chunk_size)
703 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
704 ws_args->quick_push (t);
707 return ws_args;
709 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
711 /* Number of sections is equal to the number of edges from the
712 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
713 the exit of the sections region. */
714 basic_block bb = single_succ (gimple_bb (ws_stmt));
715 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
716 vec_alloc (ws_args, 1);
717 ws_args->quick_push (t);
718 return ws_args;
721 gcc_unreachable ();
725 /* Discover whether REGION is a combined parallel+workshare region. */
727 static void
728 determine_parallel_type (struct omp_region *region)
730 basic_block par_entry_bb, par_exit_bb;
731 basic_block ws_entry_bb, ws_exit_bb;
733 if (region == NULL || region->inner == NULL
734 || region->exit == NULL || region->inner->exit == NULL
735 || region->inner->cont == NULL)
736 return;
738 /* We only support parallel+for and parallel+sections. */
739 if (region->type != GIMPLE_OMP_PARALLEL
740 || (region->inner->type != GIMPLE_OMP_FOR
741 && region->inner->type != GIMPLE_OMP_SECTIONS))
742 return;
744 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
745 WS_EXIT_BB -> PAR_EXIT_BB. */
746 par_entry_bb = region->entry;
747 par_exit_bb = region->exit;
748 ws_entry_bb = region->inner->entry;
749 ws_exit_bb = region->inner->exit;
751 if (single_succ (par_entry_bb) == ws_entry_bb
752 && single_succ (ws_exit_bb) == par_exit_bb
753 && workshare_safe_to_combine_p (ws_entry_bb)
754 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
755 || (last_and_only_stmt (ws_entry_bb)
756 && last_and_only_stmt (par_exit_bb))))
758 gimple par_stmt = last_stmt (par_entry_bb);
759 gimple ws_stmt = last_stmt (ws_entry_bb);
761 if (region->inner->type == GIMPLE_OMP_FOR)
763 /* If this is a combined parallel loop, we need to determine
764 whether or not to use the combined library calls. There
765 are two cases where we do not apply the transformation:
766 static loops and any kind of ordered loop. In the first
767 case, we already open code the loop so there is no need
768 to do anything else. In the latter case, the combined
769 parallel loop call would still need extra synchronization
770 to implement ordered semantics, so there would not be any
771 gain in using the combined call. */
772 tree clauses = gimple_omp_for_clauses (ws_stmt);
773 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
774 if (c == NULL
775 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
776 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
778 region->is_combined_parallel = false;
779 region->inner->is_combined_parallel = false;
780 return;
784 region->is_combined_parallel = true;
785 region->inner->is_combined_parallel = true;
786 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
791 /* Return true if EXPR is variable sized. */
793 static inline bool
794 is_variable_sized (const_tree expr)
796 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
799 /* Return true if DECL is a reference type. */
801 static inline bool
802 is_reference (tree decl)
804 return lang_hooks.decls.omp_privatize_by_reference (decl);
807 /* Lookup variables in the decl or field splay trees. The "maybe" form
808 allows for the variable form to not have been entered, otherwise we
809 assert that the variable must have been entered. */
811 static inline tree
812 lookup_decl (tree var, omp_context *ctx)
814 tree *n = ctx->cb.decl_map->get (var);
815 return *n;
818 static inline tree
819 maybe_lookup_decl (const_tree var, omp_context *ctx)
821 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
822 return n ? *n : NULL_TREE;
825 static inline tree
826 lookup_field (tree var, omp_context *ctx)
828 splay_tree_node n;
829 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
830 return (tree) n->value;
833 static inline tree
834 lookup_sfield (tree var, omp_context *ctx)
836 splay_tree_node n;
837 n = splay_tree_lookup (ctx->sfield_map
838 ? ctx->sfield_map : ctx->field_map,
839 (splay_tree_key) var);
840 return (tree) n->value;
843 static inline tree
844 maybe_lookup_field (tree var, omp_context *ctx)
846 splay_tree_node n;
847 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
848 return n ? (tree) n->value : NULL_TREE;
851 /* Return true if DECL should be copied by pointer. SHARED_CTX is
852 the parallel context if DECL is to be shared. */
854 static bool
855 use_pointer_for_field (tree decl, omp_context *shared_ctx)
857 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
858 return true;
860 /* We can only use copy-in/copy-out semantics for shared variables
861 when we know the value is not accessible from an outer scope. */
862 if (shared_ctx)
864 /* ??? Trivially accessible from anywhere. But why would we even
865 be passing an address in this case? Should we simply assert
866 this to be false, or should we have a cleanup pass that removes
867 these from the list of mappings? */
868 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
869 return true;
871 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
872 without analyzing the expression whether or not its location
873 is accessible to anyone else. In the case of nested parallel
874 regions it certainly may be. */
875 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
876 return true;
878 /* Do not use copy-in/copy-out for variables that have their
879 address taken. */
880 if (TREE_ADDRESSABLE (decl))
881 return true;
883 /* lower_send_shared_vars only uses copy-in, but not copy-out
884 for these. */
885 if (TREE_READONLY (decl)
886 || ((TREE_CODE (decl) == RESULT_DECL
887 || TREE_CODE (decl) == PARM_DECL)
888 && DECL_BY_REFERENCE (decl)))
889 return false;
891 /* Disallow copy-in/out in nested parallel if
892 decl is shared in outer parallel, otherwise
893 each thread could store the shared variable
894 in its own copy-in location, making the
895 variable no longer really shared. */
896 if (shared_ctx->is_nested)
898 omp_context *up;
900 for (up = shared_ctx->outer; up; up = up->outer)
901 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
902 break;
904 if (up)
906 tree c;
908 for (c = gimple_omp_taskreg_clauses (up->stmt);
909 c; c = OMP_CLAUSE_CHAIN (c))
910 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
911 && OMP_CLAUSE_DECL (c) == decl)
912 break;
914 if (c)
915 goto maybe_mark_addressable_and_ret;
919 /* For tasks avoid using copy-in/out. As tasks can be
920 deferred or executed in different thread, when GOMP_task
921 returns, the task hasn't necessarily terminated. */
922 if (is_task_ctx (shared_ctx))
924 tree outer;
925 maybe_mark_addressable_and_ret:
926 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
927 if (is_gimple_reg (outer))
929 /* Taking address of OUTER in lower_send_shared_vars
930 might need regimplification of everything that uses the
931 variable. */
932 if (!task_shared_vars)
933 task_shared_vars = BITMAP_ALLOC (NULL);
934 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
935 TREE_ADDRESSABLE (outer) = 1;
937 return true;
941 return false;
944 /* Construct a new automatic decl similar to VAR. */
946 static tree
947 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
949 tree copy = copy_var_decl (var, name, type);
951 DECL_CONTEXT (copy) = current_function_decl;
952 DECL_CHAIN (copy) = ctx->block_vars;
953 ctx->block_vars = copy;
955 return copy;
958 static tree
959 omp_copy_decl_1 (tree var, omp_context *ctx)
961 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
964 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
965 as appropriate. */
966 static tree
967 omp_build_component_ref (tree obj, tree field)
969 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
970 if (TREE_THIS_VOLATILE (field))
971 TREE_THIS_VOLATILE (ret) |= 1;
972 if (TREE_READONLY (field))
973 TREE_READONLY (ret) |= 1;
974 return ret;
977 /* Build tree nodes to access the field for VAR on the receiver side. */
979 static tree
980 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
982 tree x, field = lookup_field (var, ctx);
984 /* If the receiver record type was remapped in the child function,
985 remap the field into the new record type. */
986 x = maybe_lookup_field (field, ctx);
987 if (x != NULL)
988 field = x;
990 x = build_simple_mem_ref (ctx->receiver_decl);
991 x = omp_build_component_ref (x, field);
992 if (by_ref)
993 x = build_simple_mem_ref (x);
995 return x;
998 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
999 of a parallel, this is a component reference; for workshare constructs
1000 this is some variable. */
1002 static tree
1003 build_outer_var_ref (tree var, omp_context *ctx)
1005 tree x;
1007 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1008 x = var;
1009 else if (is_variable_sized (var))
1011 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1012 x = build_outer_var_ref (x, ctx);
1013 x = build_simple_mem_ref (x);
1015 else if (is_taskreg_ctx (ctx))
1017 bool by_ref = use_pointer_for_field (var, NULL);
1018 x = build_receiver_ref (var, by_ref, ctx);
1020 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1021 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1023 /* #pragma omp simd isn't a worksharing construct, and can reference even
1024 private vars in its linear etc. clauses. */
1025 x = NULL_TREE;
1026 if (ctx->outer && is_taskreg_ctx (ctx))
1027 x = lookup_decl (var, ctx->outer);
1028 else if (ctx->outer)
1029 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1030 if (x == NULL_TREE)
1031 x = var;
1033 else if (ctx->outer)
1034 x = lookup_decl (var, ctx->outer);
1035 else if (is_reference (var))
1036 /* This can happen with orphaned constructs. If var is reference, it is
1037 possible it is shared and as such valid. */
1038 x = var;
1039 else
1040 gcc_unreachable ();
1042 if (is_reference (var))
1043 x = build_simple_mem_ref (x);
1045 return x;
1048 /* Build tree nodes to access the field for VAR on the sender side. */
1050 static tree
1051 build_sender_ref (tree var, omp_context *ctx)
1053 tree field = lookup_sfield (var, ctx);
1054 return omp_build_component_ref (ctx->sender_decl, field);
1057 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1059 static void
1060 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1062 tree field, type, sfield = NULL_TREE;
1064 gcc_assert ((mask & 1) == 0
1065 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1066 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1067 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1069 type = TREE_TYPE (var);
1070 if (mask & 4)
1072 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1073 type = build_pointer_type (build_pointer_type (type));
1075 else if (by_ref)
1076 type = build_pointer_type (type);
1077 else if ((mask & 3) == 1 && is_reference (var))
1078 type = TREE_TYPE (type);
1080 field = build_decl (DECL_SOURCE_LOCATION (var),
1081 FIELD_DECL, DECL_NAME (var), type);
1083 /* Remember what variable this field was created for. This does have a
1084 side effect of making dwarf2out ignore this member, so for helpful
1085 debugging we clear it later in delete_omp_context. */
1086 DECL_ABSTRACT_ORIGIN (field) = var;
1087 if (type == TREE_TYPE (var))
1089 DECL_ALIGN (field) = DECL_ALIGN (var);
1090 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1091 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1093 else
1094 DECL_ALIGN (field) = TYPE_ALIGN (type);
1096 if ((mask & 3) == 3)
1098 insert_field_into_struct (ctx->record_type, field);
1099 if (ctx->srecord_type)
1101 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1102 FIELD_DECL, DECL_NAME (var), type);
1103 DECL_ABSTRACT_ORIGIN (sfield) = var;
1104 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1105 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1106 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1107 insert_field_into_struct (ctx->srecord_type, sfield);
1110 else
1112 if (ctx->srecord_type == NULL_TREE)
1114 tree t;
1116 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1117 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1118 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1120 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1121 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1122 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1123 insert_field_into_struct (ctx->srecord_type, sfield);
1124 splay_tree_insert (ctx->sfield_map,
1125 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1126 (splay_tree_value) sfield);
1129 sfield = field;
1130 insert_field_into_struct ((mask & 1) ? ctx->record_type
1131 : ctx->srecord_type, field);
1134 if (mask & 1)
1135 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1136 (splay_tree_value) field);
1137 if ((mask & 2) && ctx->sfield_map)
1138 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1139 (splay_tree_value) sfield);
1142 static tree
1143 install_var_local (tree var, omp_context *ctx)
1145 tree new_var = omp_copy_decl_1 (var, ctx);
1146 insert_decl_map (&ctx->cb, var, new_var);
1147 return new_var;
1150 /* Adjust the replacement for DECL in CTX for the new context. This means
1151 copying the DECL_VALUE_EXPR, and fixing up the type. */
1153 static void
1154 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1156 tree new_decl, size;
1158 new_decl = lookup_decl (decl, ctx);
1160 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1162 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1163 && DECL_HAS_VALUE_EXPR_P (decl))
1165 tree ve = DECL_VALUE_EXPR (decl);
1166 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1167 SET_DECL_VALUE_EXPR (new_decl, ve);
1168 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1171 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1173 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1174 if (size == error_mark_node)
1175 size = TYPE_SIZE (TREE_TYPE (new_decl));
1176 DECL_SIZE (new_decl) = size;
1178 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1179 if (size == error_mark_node)
1180 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1181 DECL_SIZE_UNIT (new_decl) = size;
1185 /* The callback for remap_decl. Search all containing contexts for a
1186 mapping of the variable; this avoids having to duplicate the splay
1187 tree ahead of time. We know a mapping doesn't already exist in the
1188 given context. Create new mappings to implement default semantics. */
1190 static tree
1191 omp_copy_decl (tree var, copy_body_data *cb)
1193 omp_context *ctx = (omp_context *) cb;
1194 tree new_var;
1196 if (TREE_CODE (var) == LABEL_DECL)
1198 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1199 DECL_CONTEXT (new_var) = current_function_decl;
1200 insert_decl_map (&ctx->cb, var, new_var);
1201 return new_var;
1204 while (!is_taskreg_ctx (ctx))
1206 ctx = ctx->outer;
1207 if (ctx == NULL)
1208 return var;
1209 new_var = maybe_lookup_decl (var, ctx);
1210 if (new_var)
1211 return new_var;
1214 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1215 return var;
1217 return error_mark_node;
1221 /* Debugging dumps for parallel regions. */
1222 void dump_omp_region (FILE *, struct omp_region *, int);
1223 void debug_omp_region (struct omp_region *);
1224 void debug_all_omp_regions (void);
1226 /* Dump the parallel region tree rooted at REGION. */
1228 void
1229 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1231 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1232 gimple_code_name[region->type]);
1234 if (region->inner)
1235 dump_omp_region (file, region->inner, indent + 4);
1237 if (region->cont)
1239 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1240 region->cont->index);
1243 if (region->exit)
1244 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1245 region->exit->index);
1246 else
1247 fprintf (file, "%*s[no exit marker]\n", indent, "");
1249 if (region->next)
1250 dump_omp_region (file, region->next, indent);
1253 DEBUG_FUNCTION void
1254 debug_omp_region (struct omp_region *region)
1256 dump_omp_region (stderr, region, 0);
1259 DEBUG_FUNCTION void
1260 debug_all_omp_regions (void)
1262 dump_omp_region (stderr, root_omp_region, 0);
1266 /* Create a new parallel region starting at STMT inside region PARENT. */
1268 static struct omp_region *
1269 new_omp_region (basic_block bb, enum gimple_code type,
1270 struct omp_region *parent)
1272 struct omp_region *region = XCNEW (struct omp_region);
1274 region->outer = parent;
1275 region->entry = bb;
1276 region->type = type;
1278 if (parent)
1280 /* This is a nested region. Add it to the list of inner
1281 regions in PARENT. */
1282 region->next = parent->inner;
1283 parent->inner = region;
1285 else
1287 /* This is a toplevel region. Add it to the list of toplevel
1288 regions in ROOT_OMP_REGION. */
1289 region->next = root_omp_region;
1290 root_omp_region = region;
1293 return region;
1296 /* Release the memory associated with the region tree rooted at REGION. */
1298 static void
1299 free_omp_region_1 (struct omp_region *region)
1301 struct omp_region *i, *n;
1303 for (i = region->inner; i ; i = n)
1305 n = i->next;
1306 free_omp_region_1 (i);
1309 free (region);
1312 /* Release the memory for the entire omp region tree. */
1314 void
1315 free_omp_regions (void)
1317 struct omp_region *r, *n;
1318 for (r = root_omp_region; r ; r = n)
1320 n = r->next;
1321 free_omp_region_1 (r);
1323 root_omp_region = NULL;
1327 /* Create a new context, with OUTER_CTX being the surrounding context. */
1329 static omp_context *
1330 new_omp_context (gimple stmt, omp_context *outer_ctx)
1332 omp_context *ctx = XCNEW (omp_context);
1334 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1335 (splay_tree_value) ctx);
1336 ctx->stmt = stmt;
1338 if (outer_ctx)
1340 ctx->outer = outer_ctx;
1341 ctx->cb = outer_ctx->cb;
1342 ctx->cb.block = NULL;
1343 ctx->depth = outer_ctx->depth + 1;
1345 else
1347 ctx->cb.src_fn = current_function_decl;
1348 ctx->cb.dst_fn = current_function_decl;
1349 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1350 gcc_checking_assert (ctx->cb.src_node);
1351 ctx->cb.dst_node = ctx->cb.src_node;
1352 ctx->cb.src_cfun = cfun;
1353 ctx->cb.copy_decl = omp_copy_decl;
1354 ctx->cb.eh_lp_nr = 0;
1355 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1356 ctx->depth = 1;
1359 ctx->cb.decl_map = new hash_map<tree, tree>;
1361 return ctx;
1364 static gimple_seq maybe_catch_exception (gimple_seq);
1366 /* Finalize task copyfn. */
1368 static void
1369 finalize_task_copyfn (gimple task_stmt)
1371 struct function *child_cfun;
1372 tree child_fn;
1373 gimple_seq seq = NULL, new_seq;
1374 gimple bind;
1376 child_fn = gimple_omp_task_copy_fn (task_stmt);
1377 if (child_fn == NULL_TREE)
1378 return;
1380 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1381 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1383 push_cfun (child_cfun);
1384 bind = gimplify_body (child_fn, false);
1385 gimple_seq_add_stmt (&seq, bind);
1386 new_seq = maybe_catch_exception (seq);
1387 if (new_seq != seq)
1389 bind = gimple_build_bind (NULL, new_seq, NULL);
1390 seq = NULL;
1391 gimple_seq_add_stmt (&seq, bind);
1393 gimple_set_body (child_fn, seq);
1394 pop_cfun ();
1396 /* Inform the callgraph about the new function. */
1397 cgraph_node::add_new_function (child_fn, false);
1400 /* Destroy a omp_context data structures. Called through the splay tree
1401 value delete callback. */
1403 static void
1404 delete_omp_context (splay_tree_value value)
1406 omp_context *ctx = (omp_context *) value;
1408 delete ctx->cb.decl_map;
1410 if (ctx->field_map)
1411 splay_tree_delete (ctx->field_map);
1412 if (ctx->sfield_map)
1413 splay_tree_delete (ctx->sfield_map);
1415 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1416 it produces corrupt debug information. */
1417 if (ctx->record_type)
1419 tree t;
1420 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1421 DECL_ABSTRACT_ORIGIN (t) = NULL;
1423 if (ctx->srecord_type)
1425 tree t;
1426 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1427 DECL_ABSTRACT_ORIGIN (t) = NULL;
1430 if (is_task_ctx (ctx))
1431 finalize_task_copyfn (ctx->stmt);
1433 XDELETE (ctx);
1436 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1437 context. */
1439 static void
1440 fixup_child_record_type (omp_context *ctx)
1442 tree f, type = ctx->record_type;
1444 /* ??? It isn't sufficient to just call remap_type here, because
1445 variably_modified_type_p doesn't work the way we expect for
1446 record types. Testing each field for whether it needs remapping
1447 and creating a new record by hand works, however. */
1448 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1449 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1450 break;
1451 if (f)
1453 tree name, new_fields = NULL;
1455 type = lang_hooks.types.make_type (RECORD_TYPE);
1456 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1457 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1458 TYPE_DECL, name, type);
1459 TYPE_NAME (type) = name;
1461 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1463 tree new_f = copy_node (f);
1464 DECL_CONTEXT (new_f) = type;
1465 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1466 DECL_CHAIN (new_f) = new_fields;
1467 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1468 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1469 &ctx->cb, NULL);
1470 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1471 &ctx->cb, NULL);
1472 new_fields = new_f;
1474 /* Arrange to be able to look up the receiver field
1475 given the sender field. */
1476 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1477 (splay_tree_value) new_f);
1479 TYPE_FIELDS (type) = nreverse (new_fields);
1480 layout_type (type);
1483 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1486 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1487 specified by CLAUSES. */
1489 static void
1490 scan_sharing_clauses (tree clauses, omp_context *ctx)
1492 tree c, decl;
1493 bool scan_array_reductions = false;
1495 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1497 bool by_ref;
1499 switch (OMP_CLAUSE_CODE (c))
1501 case OMP_CLAUSE_PRIVATE:
1502 decl = OMP_CLAUSE_DECL (c);
1503 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1504 goto do_private;
1505 else if (!is_variable_sized (decl))
1506 install_var_local (decl, ctx);
1507 break;
1509 case OMP_CLAUSE_SHARED:
1510 decl = OMP_CLAUSE_DECL (c);
1511 /* Ignore shared directives in teams construct. */
1512 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1514 /* Global variables don't need to be copied,
1515 the receiver side will use them directly. */
1516 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1517 if (is_global_var (odecl))
1518 break;
1519 insert_decl_map (&ctx->cb, decl, odecl);
1520 break;
1522 gcc_assert (is_taskreg_ctx (ctx));
1523 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1524 || !is_variable_sized (decl));
1525 /* Global variables don't need to be copied,
1526 the receiver side will use them directly. */
1527 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1528 break;
1529 by_ref = use_pointer_for_field (decl, ctx);
1530 if (! TREE_READONLY (decl)
1531 || TREE_ADDRESSABLE (decl)
1532 || by_ref
1533 || is_reference (decl))
1535 install_var_field (decl, by_ref, 3, ctx);
1536 install_var_local (decl, ctx);
1537 break;
1539 /* We don't need to copy const scalar vars back. */
1540 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1541 goto do_private;
1543 case OMP_CLAUSE_LASTPRIVATE:
1544 /* Let the corresponding firstprivate clause create
1545 the variable. */
1546 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1547 break;
1548 /* FALLTHRU */
1550 case OMP_CLAUSE_FIRSTPRIVATE:
1551 case OMP_CLAUSE_REDUCTION:
1552 case OMP_CLAUSE_LINEAR:
1553 decl = OMP_CLAUSE_DECL (c);
1554 do_private:
1555 if (is_variable_sized (decl))
1557 if (is_task_ctx (ctx))
1558 install_var_field (decl, false, 1, ctx);
1559 break;
1561 else if (is_taskreg_ctx (ctx))
1563 bool global
1564 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1565 by_ref = use_pointer_for_field (decl, NULL);
1567 if (is_task_ctx (ctx)
1568 && (global || by_ref || is_reference (decl)))
1570 install_var_field (decl, false, 1, ctx);
1571 if (!global)
1572 install_var_field (decl, by_ref, 2, ctx);
1574 else if (!global)
1575 install_var_field (decl, by_ref, 3, ctx);
1577 install_var_local (decl, ctx);
1578 break;
1580 case OMP_CLAUSE__LOOPTEMP_:
1581 gcc_assert (is_parallel_ctx (ctx));
1582 decl = OMP_CLAUSE_DECL (c);
1583 install_var_field (decl, false, 3, ctx);
1584 install_var_local (decl, ctx);
1585 break;
1587 case OMP_CLAUSE_COPYPRIVATE:
1588 case OMP_CLAUSE_COPYIN:
1589 decl = OMP_CLAUSE_DECL (c);
1590 by_ref = use_pointer_for_field (decl, NULL);
1591 install_var_field (decl, by_ref, 3, ctx);
1592 break;
1594 case OMP_CLAUSE_DEFAULT:
1595 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1596 break;
1598 case OMP_CLAUSE_FINAL:
1599 case OMP_CLAUSE_IF:
1600 case OMP_CLAUSE_NUM_THREADS:
1601 case OMP_CLAUSE_NUM_TEAMS:
1602 case OMP_CLAUSE_THREAD_LIMIT:
1603 case OMP_CLAUSE_DEVICE:
1604 case OMP_CLAUSE_SCHEDULE:
1605 case OMP_CLAUSE_DIST_SCHEDULE:
1606 case OMP_CLAUSE_DEPEND:
1607 if (ctx->outer)
1608 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1609 break;
1611 case OMP_CLAUSE_TO:
1612 case OMP_CLAUSE_FROM:
1613 case OMP_CLAUSE_MAP:
1614 if (ctx->outer)
1615 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1616 decl = OMP_CLAUSE_DECL (c);
1617 /* Global variables with "omp declare target" attribute
1618 don't need to be copied, the receiver side will use them
1619 directly. */
1620 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1621 && DECL_P (decl)
1622 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1623 && lookup_attribute ("omp declare target",
1624 DECL_ATTRIBUTES (decl)))
1625 break;
1626 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1627 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1629 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1630 #pragma omp target data, there is nothing to map for
1631 those. */
1632 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1633 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1634 break;
1636 if (DECL_P (decl))
1638 if (DECL_SIZE (decl)
1639 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1641 tree decl2 = DECL_VALUE_EXPR (decl);
1642 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1643 decl2 = TREE_OPERAND (decl2, 0);
1644 gcc_assert (DECL_P (decl2));
1645 install_var_field (decl2, true, 3, ctx);
1646 install_var_local (decl2, ctx);
1647 install_var_local (decl, ctx);
1649 else
1651 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1652 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1653 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1654 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1655 install_var_field (decl, true, 7, ctx);
1656 else
1657 install_var_field (decl, true, 3, ctx);
1658 if (gimple_omp_target_kind (ctx->stmt)
1659 == GF_OMP_TARGET_KIND_REGION)
1660 install_var_local (decl, ctx);
1663 else
1665 tree base = get_base_address (decl);
1666 tree nc = OMP_CLAUSE_CHAIN (c);
1667 if (DECL_P (base)
1668 && nc != NULL_TREE
1669 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1670 && OMP_CLAUSE_DECL (nc) == base
1671 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1672 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1674 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1675 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1677 else
1679 if (ctx->outer)
1681 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1682 decl = OMP_CLAUSE_DECL (c);
1684 gcc_assert (!splay_tree_lookup (ctx->field_map,
1685 (splay_tree_key) decl));
1686 tree field
1687 = build_decl (OMP_CLAUSE_LOCATION (c),
1688 FIELD_DECL, NULL_TREE, ptr_type_node);
1689 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1690 insert_field_into_struct (ctx->record_type, field);
1691 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1692 (splay_tree_value) field);
1695 break;
1697 case OMP_CLAUSE_NOWAIT:
1698 case OMP_CLAUSE_ORDERED:
1699 case OMP_CLAUSE_COLLAPSE:
1700 case OMP_CLAUSE_UNTIED:
1701 case OMP_CLAUSE_MERGEABLE:
1702 case OMP_CLAUSE_PROC_BIND:
1703 case OMP_CLAUSE_SAFELEN:
1704 break;
1706 case OMP_CLAUSE_ALIGNED:
1707 decl = OMP_CLAUSE_DECL (c);
1708 if (is_global_var (decl)
1709 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1710 install_var_local (decl, ctx);
1711 break;
1713 default:
1714 gcc_unreachable ();
1718 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1720 switch (OMP_CLAUSE_CODE (c))
1722 case OMP_CLAUSE_LASTPRIVATE:
1723 /* Let the corresponding firstprivate clause create
1724 the variable. */
1725 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1726 scan_array_reductions = true;
1727 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1728 break;
1729 /* FALLTHRU */
1731 case OMP_CLAUSE_PRIVATE:
1732 case OMP_CLAUSE_FIRSTPRIVATE:
1733 case OMP_CLAUSE_REDUCTION:
1734 case OMP_CLAUSE_LINEAR:
1735 decl = OMP_CLAUSE_DECL (c);
1736 if (is_variable_sized (decl))
1737 install_var_local (decl, ctx);
1738 fixup_remapped_decl (decl, ctx,
1739 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1740 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1741 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1742 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1743 scan_array_reductions = true;
1744 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1745 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1746 scan_array_reductions = true;
1747 break;
1749 case OMP_CLAUSE_SHARED:
1750 /* Ignore shared directives in teams construct. */
1751 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1752 break;
1753 decl = OMP_CLAUSE_DECL (c);
1754 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1755 fixup_remapped_decl (decl, ctx, false);
1756 break;
1758 case OMP_CLAUSE_MAP:
1759 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1760 break;
1761 decl = OMP_CLAUSE_DECL (c);
1762 if (DECL_P (decl)
1763 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1764 && lookup_attribute ("omp declare target",
1765 DECL_ATTRIBUTES (decl)))
1766 break;
1767 if (DECL_P (decl))
1769 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1770 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1771 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1773 tree new_decl = lookup_decl (decl, ctx);
1774 TREE_TYPE (new_decl)
1775 = remap_type (TREE_TYPE (decl), &ctx->cb);
1777 else if (DECL_SIZE (decl)
1778 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1780 tree decl2 = DECL_VALUE_EXPR (decl);
1781 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1782 decl2 = TREE_OPERAND (decl2, 0);
1783 gcc_assert (DECL_P (decl2));
1784 fixup_remapped_decl (decl2, ctx, false);
1785 fixup_remapped_decl (decl, ctx, true);
1787 else
1788 fixup_remapped_decl (decl, ctx, false);
1790 break;
1792 case OMP_CLAUSE_COPYPRIVATE:
1793 case OMP_CLAUSE_COPYIN:
1794 case OMP_CLAUSE_DEFAULT:
1795 case OMP_CLAUSE_IF:
1796 case OMP_CLAUSE_NUM_THREADS:
1797 case OMP_CLAUSE_NUM_TEAMS:
1798 case OMP_CLAUSE_THREAD_LIMIT:
1799 case OMP_CLAUSE_DEVICE:
1800 case OMP_CLAUSE_SCHEDULE:
1801 case OMP_CLAUSE_DIST_SCHEDULE:
1802 case OMP_CLAUSE_NOWAIT:
1803 case OMP_CLAUSE_ORDERED:
1804 case OMP_CLAUSE_COLLAPSE:
1805 case OMP_CLAUSE_UNTIED:
1806 case OMP_CLAUSE_FINAL:
1807 case OMP_CLAUSE_MERGEABLE:
1808 case OMP_CLAUSE_PROC_BIND:
1809 case OMP_CLAUSE_SAFELEN:
1810 case OMP_CLAUSE_ALIGNED:
1811 case OMP_CLAUSE_DEPEND:
1812 case OMP_CLAUSE__LOOPTEMP_:
1813 case OMP_CLAUSE_TO:
1814 case OMP_CLAUSE_FROM:
1815 break;
1817 default:
1818 gcc_unreachable ();
1822 if (scan_array_reductions)
1823 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1824 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1825 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1827 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1828 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1830 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1831 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1832 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1833 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1834 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1835 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1838 /* Create a new name for omp child function. Returns an identifier. */
1840 static tree
1841 create_omp_child_function_name (bool task_copy)
1843 return (clone_function_name (current_function_decl,
1844 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1847 /* Build a decl for the omp child function. It'll not contain a body
1848 yet, just the bare decl. */
1850 static void
1851 create_omp_child_function (omp_context *ctx, bool task_copy)
1853 tree decl, type, name, t;
1855 name = create_omp_child_function_name (task_copy);
1856 if (task_copy)
1857 type = build_function_type_list (void_type_node, ptr_type_node,
1858 ptr_type_node, NULL_TREE);
1859 else
1860 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1862 decl = build_decl (gimple_location (ctx->stmt),
1863 FUNCTION_DECL, name, type);
1865 if (!task_copy)
1866 ctx->cb.dst_fn = decl;
1867 else
1868 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1870 TREE_STATIC (decl) = 1;
1871 TREE_USED (decl) = 1;
1872 DECL_ARTIFICIAL (decl) = 1;
1873 DECL_IGNORED_P (decl) = 0;
1874 TREE_PUBLIC (decl) = 0;
1875 DECL_UNINLINABLE (decl) = 1;
1876 DECL_EXTERNAL (decl) = 0;
1877 DECL_CONTEXT (decl) = NULL_TREE;
1878 DECL_INITIAL (decl) = make_node (BLOCK);
1879 bool target_p = false;
1880 if (lookup_attribute ("omp declare target",
1881 DECL_ATTRIBUTES (current_function_decl)))
1882 target_p = true;
1883 else
1885 omp_context *octx;
1886 for (octx = ctx; octx; octx = octx->outer)
1887 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1888 && gimple_omp_target_kind (octx->stmt)
1889 == GF_OMP_TARGET_KIND_REGION)
1891 target_p = true;
1892 break;
1895 if (target_p)
1896 DECL_ATTRIBUTES (decl)
1897 = tree_cons (get_identifier ("omp declare target"),
1898 NULL_TREE, DECL_ATTRIBUTES (decl));
1900 t = build_decl (DECL_SOURCE_LOCATION (decl),
1901 RESULT_DECL, NULL_TREE, void_type_node);
1902 DECL_ARTIFICIAL (t) = 1;
1903 DECL_IGNORED_P (t) = 1;
1904 DECL_CONTEXT (t) = decl;
1905 DECL_RESULT (decl) = t;
1907 t = build_decl (DECL_SOURCE_LOCATION (decl),
1908 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1909 DECL_ARTIFICIAL (t) = 1;
1910 DECL_NAMELESS (t) = 1;
1911 DECL_ARG_TYPE (t) = ptr_type_node;
1912 DECL_CONTEXT (t) = current_function_decl;
1913 TREE_USED (t) = 1;
1914 DECL_ARGUMENTS (decl) = t;
1915 if (!task_copy)
1916 ctx->receiver_decl = t;
1917 else
1919 t = build_decl (DECL_SOURCE_LOCATION (decl),
1920 PARM_DECL, get_identifier (".omp_data_o"),
1921 ptr_type_node);
1922 DECL_ARTIFICIAL (t) = 1;
1923 DECL_NAMELESS (t) = 1;
1924 DECL_ARG_TYPE (t) = ptr_type_node;
1925 DECL_CONTEXT (t) = current_function_decl;
1926 TREE_USED (t) = 1;
1927 TREE_ADDRESSABLE (t) = 1;
1928 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1929 DECL_ARGUMENTS (decl) = t;
1932 /* Allocate memory for the function structure. The call to
1933 allocate_struct_function clobbers CFUN, so we need to restore
1934 it afterward. */
1935 push_struct_function (decl);
1936 cfun->function_end_locus = gimple_location (ctx->stmt);
1937 pop_cfun ();
1940 /* Callback for walk_gimple_seq. Check if combined parallel
1941 contains gimple_omp_for_combined_into_p OMP_FOR. */
1943 static tree
1944 find_combined_for (gimple_stmt_iterator *gsi_p,
1945 bool *handled_ops_p,
1946 struct walk_stmt_info *wi)
1948 gimple stmt = gsi_stmt (*gsi_p);
1950 *handled_ops_p = true;
1951 switch (gimple_code (stmt))
1953 WALK_SUBSTMTS;
1955 case GIMPLE_OMP_FOR:
1956 if (gimple_omp_for_combined_into_p (stmt)
1957 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
1959 wi->info = stmt;
1960 return integer_zero_node;
1962 break;
1963 default:
1964 break;
1966 return NULL;
1969 /* Scan an OpenMP parallel directive. */
1971 static void
1972 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1974 omp_context *ctx;
1975 tree name;
1976 gimple stmt = gsi_stmt (*gsi);
1978 /* Ignore parallel directives with empty bodies, unless there
1979 are copyin clauses. */
1980 if (optimize > 0
1981 && empty_body_p (gimple_omp_body (stmt))
1982 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1983 OMP_CLAUSE_COPYIN) == NULL)
1985 gsi_replace (gsi, gimple_build_nop (), false);
1986 return;
1989 if (gimple_omp_parallel_combined_p (stmt))
1991 gimple for_stmt;
1992 struct walk_stmt_info wi;
1994 memset (&wi, 0, sizeof (wi));
1995 wi.val_only = true;
1996 walk_gimple_seq (gimple_omp_body (stmt),
1997 find_combined_for, NULL, &wi);
1998 for_stmt = (gimple) wi.info;
1999 if (for_stmt)
2001 struct omp_for_data fd;
2002 extract_omp_for_data (for_stmt, &fd, NULL);
2003 /* We need two temporaries with fd.loop.v type (istart/iend)
2004 and then (fd.collapse - 1) temporaries with the same
2005 type for count2 ... countN-1 vars if not constant. */
2006 size_t count = 2, i;
2007 tree type = fd.iter_type;
2008 if (fd.collapse > 1
2009 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2010 count += fd.collapse - 1;
2011 for (i = 0; i < count; i++)
2013 tree temp = create_tmp_var (type, NULL);
2014 tree c = build_omp_clause (UNKNOWN_LOCATION,
2015 OMP_CLAUSE__LOOPTEMP_);
2016 insert_decl_map (&outer_ctx->cb, temp, temp);
2017 OMP_CLAUSE_DECL (c) = temp;
2018 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2019 gimple_omp_parallel_set_clauses (stmt, c);
2024 ctx = new_omp_context (stmt, outer_ctx);
2025 if (taskreg_nesting_level > 1)
2026 ctx->is_nested = true;
2027 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2028 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2029 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2030 name = create_tmp_var_name (".omp_data_s");
2031 name = build_decl (gimple_location (stmt),
2032 TYPE_DECL, name, ctx->record_type);
2033 DECL_ARTIFICIAL (name) = 1;
2034 DECL_NAMELESS (name) = 1;
2035 TYPE_NAME (ctx->record_type) = name;
2036 create_omp_child_function (ctx, false);
2037 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2039 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2040 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2042 if (TYPE_FIELDS (ctx->record_type) == NULL)
2043 ctx->record_type = ctx->receiver_decl = NULL;
2044 else
2046 layout_type (ctx->record_type);
2047 fixup_child_record_type (ctx);
2051 /* Scan an OpenMP task directive. */
2053 static void
2054 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2056 omp_context *ctx;
2057 tree name, t;
2058 gimple stmt = gsi_stmt (*gsi);
2059 location_t loc = gimple_location (stmt);
2061 /* Ignore task directives with empty bodies. */
2062 if (optimize > 0
2063 && empty_body_p (gimple_omp_body (stmt)))
2065 gsi_replace (gsi, gimple_build_nop (), false);
2066 return;
2069 ctx = new_omp_context (stmt, outer_ctx);
2070 if (taskreg_nesting_level > 1)
2071 ctx->is_nested = true;
2072 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2073 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2074 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2075 name = create_tmp_var_name (".omp_data_s");
2076 name = build_decl (gimple_location (stmt),
2077 TYPE_DECL, name, ctx->record_type);
2078 DECL_ARTIFICIAL (name) = 1;
2079 DECL_NAMELESS (name) = 1;
2080 TYPE_NAME (ctx->record_type) = name;
2081 create_omp_child_function (ctx, false);
2082 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2084 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2086 if (ctx->srecord_type)
2088 name = create_tmp_var_name (".omp_data_a");
2089 name = build_decl (gimple_location (stmt),
2090 TYPE_DECL, name, ctx->srecord_type);
2091 DECL_ARTIFICIAL (name) = 1;
2092 DECL_NAMELESS (name) = 1;
2093 TYPE_NAME (ctx->srecord_type) = name;
2094 create_omp_child_function (ctx, true);
2097 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2099 if (TYPE_FIELDS (ctx->record_type) == NULL)
2101 ctx->record_type = ctx->receiver_decl = NULL;
2102 t = build_int_cst (long_integer_type_node, 0);
2103 gimple_omp_task_set_arg_size (stmt, t);
2104 t = build_int_cst (long_integer_type_node, 1);
2105 gimple_omp_task_set_arg_align (stmt, t);
2107 else
2109 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2110 /* Move VLA fields to the end. */
2111 p = &TYPE_FIELDS (ctx->record_type);
2112 while (*p)
2113 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2114 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2116 *q = *p;
2117 *p = TREE_CHAIN (*p);
2118 TREE_CHAIN (*q) = NULL_TREE;
2119 q = &TREE_CHAIN (*q);
2121 else
2122 p = &DECL_CHAIN (*p);
2123 *p = vla_fields;
2124 layout_type (ctx->record_type);
2125 fixup_child_record_type (ctx);
2126 if (ctx->srecord_type)
2127 layout_type (ctx->srecord_type);
2128 t = fold_convert_loc (loc, long_integer_type_node,
2129 TYPE_SIZE_UNIT (ctx->record_type));
2130 gimple_omp_task_set_arg_size (stmt, t);
2131 t = build_int_cst (long_integer_type_node,
2132 TYPE_ALIGN_UNIT (ctx->record_type));
2133 gimple_omp_task_set_arg_align (stmt, t);
2138 /* Scan an OpenMP loop directive. */
2140 static void
2141 scan_omp_for (gimple stmt, omp_context *outer_ctx)
2143 omp_context *ctx;
2144 size_t i;
2146 ctx = new_omp_context (stmt, outer_ctx);
2148 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2150 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2151 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2153 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2154 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2155 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2156 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2158 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2161 /* Scan an OpenMP sections directive. */
2163 static void
2164 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
2166 omp_context *ctx;
2168 ctx = new_omp_context (stmt, outer_ctx);
2169 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2170 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2173 /* Scan an OpenMP single directive. */
2175 static void
2176 scan_omp_single (gimple stmt, omp_context *outer_ctx)
2178 omp_context *ctx;
2179 tree name;
2181 ctx = new_omp_context (stmt, outer_ctx);
2182 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2183 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2184 name = create_tmp_var_name (".omp_copy_s");
2185 name = build_decl (gimple_location (stmt),
2186 TYPE_DECL, name, ctx->record_type);
2187 TYPE_NAME (ctx->record_type) = name;
2189 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2190 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2192 if (TYPE_FIELDS (ctx->record_type) == NULL)
2193 ctx->record_type = NULL;
2194 else
2195 layout_type (ctx->record_type);
2198 /* Scan an OpenMP target{, data, update} directive. */
2200 static void
2201 scan_omp_target (gimple stmt, omp_context *outer_ctx)
2203 omp_context *ctx;
2204 tree name;
2205 int kind = gimple_omp_target_kind (stmt);
2207 ctx = new_omp_context (stmt, outer_ctx);
2208 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2209 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2210 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2211 name = create_tmp_var_name (".omp_data_t");
2212 name = build_decl (gimple_location (stmt),
2213 TYPE_DECL, name, ctx->record_type);
2214 DECL_ARTIFICIAL (name) = 1;
2215 DECL_NAMELESS (name) = 1;
2216 TYPE_NAME (ctx->record_type) = name;
2217 if (kind == GF_OMP_TARGET_KIND_REGION)
2219 create_omp_child_function (ctx, false);
2220 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2223 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2224 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2226 if (TYPE_FIELDS (ctx->record_type) == NULL)
2227 ctx->record_type = ctx->receiver_decl = NULL;
2228 else
2230 TYPE_FIELDS (ctx->record_type)
2231 = nreverse (TYPE_FIELDS (ctx->record_type));
2232 #ifdef ENABLE_CHECKING
2233 tree field;
2234 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2235 for (field = TYPE_FIELDS (ctx->record_type);
2236 field;
2237 field = DECL_CHAIN (field))
2238 gcc_assert (DECL_ALIGN (field) == align);
2239 #endif
2240 layout_type (ctx->record_type);
2241 if (kind == GF_OMP_TARGET_KIND_REGION)
2242 fixup_child_record_type (ctx);
2246 /* Scan an OpenMP teams directive. */
2248 static void
2249 scan_omp_teams (gimple stmt, omp_context *outer_ctx)
2251 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2252 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2253 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2256 /* Check OpenMP nesting restrictions. */
2257 static bool
2258 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2260 if (ctx != NULL)
2262 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2263 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2265 error_at (gimple_location (stmt),
2266 "OpenMP constructs may not be nested inside simd region");
2267 return false;
2269 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2271 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2272 || (gimple_omp_for_kind (stmt)
2273 != GF_OMP_FOR_KIND_DISTRIBUTE))
2274 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2276 error_at (gimple_location (stmt),
2277 "only distribute or parallel constructs are allowed to "
2278 "be closely nested inside teams construct");
2279 return false;
2283 switch (gimple_code (stmt))
2285 case GIMPLE_OMP_FOR:
2286 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2287 return true;
2288 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2290 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2292 error_at (gimple_location (stmt),
2293 "distribute construct must be closely nested inside "
2294 "teams construct");
2295 return false;
2297 return true;
2299 /* FALLTHRU */
2300 case GIMPLE_CALL:
2301 if (is_gimple_call (stmt)
2302 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2303 == BUILT_IN_GOMP_CANCEL
2304 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2305 == BUILT_IN_GOMP_CANCELLATION_POINT))
2307 const char *bad = NULL;
2308 const char *kind = NULL;
2309 if (ctx == NULL)
2311 error_at (gimple_location (stmt), "orphaned %qs construct",
2312 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2313 == BUILT_IN_GOMP_CANCEL
2314 ? "#pragma omp cancel"
2315 : "#pragma omp cancellation point");
2316 return false;
2318 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2319 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2320 : 0)
2322 case 1:
2323 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2324 bad = "#pragma omp parallel";
2325 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2326 == BUILT_IN_GOMP_CANCEL
2327 && !integer_zerop (gimple_call_arg (stmt, 1)))
2328 ctx->cancellable = true;
2329 kind = "parallel";
2330 break;
2331 case 2:
2332 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2333 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2334 bad = "#pragma omp for";
2335 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2336 == BUILT_IN_GOMP_CANCEL
2337 && !integer_zerop (gimple_call_arg (stmt, 1)))
2339 ctx->cancellable = true;
2340 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2341 OMP_CLAUSE_NOWAIT))
2342 warning_at (gimple_location (stmt), 0,
2343 "%<#pragma omp cancel for%> inside "
2344 "%<nowait%> for construct");
2345 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2346 OMP_CLAUSE_ORDERED))
2347 warning_at (gimple_location (stmt), 0,
2348 "%<#pragma omp cancel for%> inside "
2349 "%<ordered%> for construct");
2351 kind = "for";
2352 break;
2353 case 4:
2354 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2355 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2356 bad = "#pragma omp sections";
2357 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2358 == BUILT_IN_GOMP_CANCEL
2359 && !integer_zerop (gimple_call_arg (stmt, 1)))
2361 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2363 ctx->cancellable = true;
2364 if (find_omp_clause (gimple_omp_sections_clauses
2365 (ctx->stmt),
2366 OMP_CLAUSE_NOWAIT))
2367 warning_at (gimple_location (stmt), 0,
2368 "%<#pragma omp cancel sections%> inside "
2369 "%<nowait%> sections construct");
2371 else
2373 gcc_assert (ctx->outer
2374 && gimple_code (ctx->outer->stmt)
2375 == GIMPLE_OMP_SECTIONS);
2376 ctx->outer->cancellable = true;
2377 if (find_omp_clause (gimple_omp_sections_clauses
2378 (ctx->outer->stmt),
2379 OMP_CLAUSE_NOWAIT))
2380 warning_at (gimple_location (stmt), 0,
2381 "%<#pragma omp cancel sections%> inside "
2382 "%<nowait%> sections construct");
2385 kind = "sections";
2386 break;
2387 case 8:
2388 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2389 bad = "#pragma omp task";
2390 else
2391 ctx->cancellable = true;
2392 kind = "taskgroup";
2393 break;
2394 default:
2395 error_at (gimple_location (stmt), "invalid arguments");
2396 return false;
2398 if (bad)
2400 error_at (gimple_location (stmt),
2401 "%<%s %s%> construct not closely nested inside of %qs",
2402 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2403 == BUILT_IN_GOMP_CANCEL
2404 ? "#pragma omp cancel"
2405 : "#pragma omp cancellation point", kind, bad);
2406 return false;
2409 /* FALLTHRU */
2410 case GIMPLE_OMP_SECTIONS:
2411 case GIMPLE_OMP_SINGLE:
2412 for (; ctx != NULL; ctx = ctx->outer)
2413 switch (gimple_code (ctx->stmt))
2415 case GIMPLE_OMP_FOR:
2416 case GIMPLE_OMP_SECTIONS:
2417 case GIMPLE_OMP_SINGLE:
2418 case GIMPLE_OMP_ORDERED:
2419 case GIMPLE_OMP_MASTER:
2420 case GIMPLE_OMP_TASK:
2421 case GIMPLE_OMP_CRITICAL:
2422 if (is_gimple_call (stmt))
2424 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2425 != BUILT_IN_GOMP_BARRIER)
2426 return true;
2427 error_at (gimple_location (stmt),
2428 "barrier region may not be closely nested inside "
2429 "of work-sharing, critical, ordered, master or "
2430 "explicit task region");
2431 return false;
2433 error_at (gimple_location (stmt),
2434 "work-sharing region may not be closely nested inside "
2435 "of work-sharing, critical, ordered, master or explicit "
2436 "task region");
2437 return false;
2438 case GIMPLE_OMP_PARALLEL:
2439 return true;
2440 default:
2441 break;
2443 break;
2444 case GIMPLE_OMP_MASTER:
2445 for (; ctx != NULL; ctx = ctx->outer)
2446 switch (gimple_code (ctx->stmt))
2448 case GIMPLE_OMP_FOR:
2449 case GIMPLE_OMP_SECTIONS:
2450 case GIMPLE_OMP_SINGLE:
2451 case GIMPLE_OMP_TASK:
2452 error_at (gimple_location (stmt),
2453 "master region may not be closely nested inside "
2454 "of work-sharing or explicit task region");
2455 return false;
2456 case GIMPLE_OMP_PARALLEL:
2457 return true;
2458 default:
2459 break;
2461 break;
2462 case GIMPLE_OMP_ORDERED:
2463 for (; ctx != NULL; ctx = ctx->outer)
2464 switch (gimple_code (ctx->stmt))
2466 case GIMPLE_OMP_CRITICAL:
2467 case GIMPLE_OMP_TASK:
2468 error_at (gimple_location (stmt),
2469 "ordered region may not be closely nested inside "
2470 "of critical or explicit task region");
2471 return false;
2472 case GIMPLE_OMP_FOR:
2473 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2474 OMP_CLAUSE_ORDERED) == NULL)
2476 error_at (gimple_location (stmt),
2477 "ordered region must be closely nested inside "
2478 "a loop region with an ordered clause");
2479 return false;
2481 return true;
2482 case GIMPLE_OMP_PARALLEL:
2483 error_at (gimple_location (stmt),
2484 "ordered region must be closely nested inside "
2485 "a loop region with an ordered clause");
2486 return false;
2487 default:
2488 break;
2490 break;
2491 case GIMPLE_OMP_CRITICAL:
2492 for (; ctx != NULL; ctx = ctx->outer)
2493 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
2494 && (gimple_omp_critical_name (stmt)
2495 == gimple_omp_critical_name (ctx->stmt)))
2497 error_at (gimple_location (stmt),
2498 "critical region may not be nested inside a critical "
2499 "region with the same name");
2500 return false;
2502 break;
2503 case GIMPLE_OMP_TEAMS:
2504 if (ctx == NULL
2505 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2506 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2508 error_at (gimple_location (stmt),
2509 "teams construct not closely nested inside of target "
2510 "region");
2511 return false;
2513 break;
2514 case GIMPLE_OMP_TARGET:
2515 for (; ctx != NULL; ctx = ctx->outer)
2516 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
2517 && gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_REGION)
2519 const char *name;
2520 switch (gimple_omp_target_kind (stmt))
2522 case GF_OMP_TARGET_KIND_REGION: name = "target"; break;
2523 case GF_OMP_TARGET_KIND_DATA: name = "target data"; break;
2524 case GF_OMP_TARGET_KIND_UPDATE: name = "target update"; break;
2525 default: gcc_unreachable ();
2527 warning_at (gimple_location (stmt), 0,
2528 "%s construct inside of target region", name);
2530 break;
2531 default:
2532 break;
2534 return true;
2538 /* Helper function scan_omp.
2540 Callback for walk_tree or operators in walk_gimple_stmt used to
2541 scan for OpenMP directives in TP. */
2543 static tree
2544 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2546 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2547 omp_context *ctx = (omp_context *) wi->info;
2548 tree t = *tp;
2550 switch (TREE_CODE (t))
2552 case VAR_DECL:
2553 case PARM_DECL:
2554 case LABEL_DECL:
2555 case RESULT_DECL:
2556 if (ctx)
2557 *tp = remap_decl (t, &ctx->cb);
2558 break;
2560 default:
2561 if (ctx && TYPE_P (t))
2562 *tp = remap_type (t, &ctx->cb);
2563 else if (!DECL_P (t))
2565 *walk_subtrees = 1;
2566 if (ctx)
2568 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2569 if (tem != TREE_TYPE (t))
2571 if (TREE_CODE (t) == INTEGER_CST)
2572 *tp = wide_int_to_tree (tem, t);
2573 else
2574 TREE_TYPE (t) = tem;
2578 break;
2581 return NULL_TREE;
2584 /* Return true if FNDECL is a setjmp or a longjmp. */
2586 static bool
2587 setjmp_or_longjmp_p (const_tree fndecl)
2589 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2590 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2591 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2592 return true;
2594 tree declname = DECL_NAME (fndecl);
2595 if (!declname)
2596 return false;
2597 const char *name = IDENTIFIER_POINTER (declname);
2598 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2602 /* Helper function for scan_omp.
2604 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2605 the current statement in GSI. */
2607 static tree
2608 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2609 struct walk_stmt_info *wi)
2611 gimple stmt = gsi_stmt (*gsi);
2612 omp_context *ctx = (omp_context *) wi->info;
2614 if (gimple_has_location (stmt))
2615 input_location = gimple_location (stmt);
2617 /* Check the OpenMP nesting restrictions. */
2618 bool remove = false;
2619 if (is_gimple_omp (stmt))
2620 remove = !check_omp_nesting_restrictions (stmt, ctx);
2621 else if (is_gimple_call (stmt))
2623 tree fndecl = gimple_call_fndecl (stmt);
2624 if (fndecl)
2626 if (setjmp_or_longjmp_p (fndecl)
2627 && ctx
2628 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2629 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2631 remove = true;
2632 error_at (gimple_location (stmt),
2633 "setjmp/longjmp inside simd construct");
2635 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2636 switch (DECL_FUNCTION_CODE (fndecl))
2638 case BUILT_IN_GOMP_BARRIER:
2639 case BUILT_IN_GOMP_CANCEL:
2640 case BUILT_IN_GOMP_CANCELLATION_POINT:
2641 case BUILT_IN_GOMP_TASKYIELD:
2642 case BUILT_IN_GOMP_TASKWAIT:
2643 case BUILT_IN_GOMP_TASKGROUP_START:
2644 case BUILT_IN_GOMP_TASKGROUP_END:
2645 remove = !check_omp_nesting_restrictions (stmt, ctx);
2646 break;
2647 default:
2648 break;
2652 if (remove)
2654 stmt = gimple_build_nop ();
2655 gsi_replace (gsi, stmt, false);
2658 *handled_ops_p = true;
2660 switch (gimple_code (stmt))
2662 case GIMPLE_OMP_PARALLEL:
2663 taskreg_nesting_level++;
2664 scan_omp_parallel (gsi, ctx);
2665 taskreg_nesting_level--;
2666 break;
2668 case GIMPLE_OMP_TASK:
2669 taskreg_nesting_level++;
2670 scan_omp_task (gsi, ctx);
2671 taskreg_nesting_level--;
2672 break;
2674 case GIMPLE_OMP_FOR:
2675 scan_omp_for (stmt, ctx);
2676 break;
2678 case GIMPLE_OMP_SECTIONS:
2679 scan_omp_sections (stmt, ctx);
2680 break;
2682 case GIMPLE_OMP_SINGLE:
2683 scan_omp_single (stmt, ctx);
2684 break;
2686 case GIMPLE_OMP_SECTION:
2687 case GIMPLE_OMP_MASTER:
2688 case GIMPLE_OMP_TASKGROUP:
2689 case GIMPLE_OMP_ORDERED:
2690 case GIMPLE_OMP_CRITICAL:
2691 ctx = new_omp_context (stmt, ctx);
2692 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2693 break;
2695 case GIMPLE_OMP_TARGET:
2696 scan_omp_target (stmt, ctx);
2697 break;
2699 case GIMPLE_OMP_TEAMS:
2700 scan_omp_teams (stmt, ctx);
2701 break;
2703 case GIMPLE_BIND:
2705 tree var;
2707 *handled_ops_p = false;
2708 if (ctx)
2709 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2710 insert_decl_map (&ctx->cb, var, var);
2712 break;
2713 default:
2714 *handled_ops_p = false;
2715 break;
2718 return NULL_TREE;
2722 /* Scan all the statements starting at the current statement. CTX
2723 contains context information about the OpenMP directives and
2724 clauses found during the scan. */
2726 static void
2727 scan_omp (gimple_seq *body_p, omp_context *ctx)
2729 location_t saved_location;
2730 struct walk_stmt_info wi;
2732 memset (&wi, 0, sizeof (wi));
2733 wi.info = ctx;
2734 wi.want_locations = true;
2736 saved_location = input_location;
2737 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2738 input_location = saved_location;
2741 /* Re-gimplification and code generation routines. */
2743 /* Build a call to GOMP_barrier. */
2745 static gimple
2746 build_omp_barrier (tree lhs)
2748 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2749 : BUILT_IN_GOMP_BARRIER);
2750 gimple g = gimple_build_call (fndecl, 0);
2751 if (lhs)
2752 gimple_call_set_lhs (g, lhs);
2753 return g;
2756 /* If a context was created for STMT when it was scanned, return it. */
2758 static omp_context *
2759 maybe_lookup_ctx (gimple stmt)
2761 splay_tree_node n;
2762 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2763 return n ? (omp_context *) n->value : NULL;
2767 /* Find the mapping for DECL in CTX or the immediately enclosing
2768 context that has a mapping for DECL.
2770 If CTX is a nested parallel directive, we may have to use the decl
2771 mappings created in CTX's parent context. Suppose that we have the
2772 following parallel nesting (variable UIDs showed for clarity):
2774 iD.1562 = 0;
2775 #omp parallel shared(iD.1562) -> outer parallel
2776 iD.1562 = iD.1562 + 1;
2778 #omp parallel shared (iD.1562) -> inner parallel
2779 iD.1562 = iD.1562 - 1;
2781 Each parallel structure will create a distinct .omp_data_s structure
2782 for copying iD.1562 in/out of the directive:
2784 outer parallel .omp_data_s.1.i -> iD.1562
2785 inner parallel .omp_data_s.2.i -> iD.1562
2787 A shared variable mapping will produce a copy-out operation before
2788 the parallel directive and a copy-in operation after it. So, in
2789 this case we would have:
2791 iD.1562 = 0;
2792 .omp_data_o.1.i = iD.1562;
2793 #omp parallel shared(iD.1562) -> outer parallel
2794 .omp_data_i.1 = &.omp_data_o.1
2795 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2797 .omp_data_o.2.i = iD.1562; -> **
2798 #omp parallel shared(iD.1562) -> inner parallel
2799 .omp_data_i.2 = &.omp_data_o.2
2800 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2803 ** This is a problem. The symbol iD.1562 cannot be referenced
2804 inside the body of the outer parallel region. But since we are
2805 emitting this copy operation while expanding the inner parallel
2806 directive, we need to access the CTX structure of the outer
2807 parallel directive to get the correct mapping:
2809 .omp_data_o.2.i = .omp_data_i.1->i
2811 Since there may be other workshare or parallel directives enclosing
2812 the parallel directive, it may be necessary to walk up the context
2813 parent chain. This is not a problem in general because nested
2814 parallelism happens only rarely. */
2816 static tree
2817 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2819 tree t;
2820 omp_context *up;
2822 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2823 t = maybe_lookup_decl (decl, up);
2825 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2827 return t ? t : decl;
2831 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2832 in outer contexts. */
2834 static tree
2835 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2837 tree t = NULL;
2838 omp_context *up;
2840 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2841 t = maybe_lookup_decl (decl, up);
2843 return t ? t : decl;
2847 /* Construct the initialization value for reduction CLAUSE. */
2849 tree
2850 omp_reduction_init (tree clause, tree type)
2852 location_t loc = OMP_CLAUSE_LOCATION (clause);
2853 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2855 case PLUS_EXPR:
2856 case MINUS_EXPR:
2857 case BIT_IOR_EXPR:
2858 case BIT_XOR_EXPR:
2859 case TRUTH_OR_EXPR:
2860 case TRUTH_ORIF_EXPR:
2861 case TRUTH_XOR_EXPR:
2862 case NE_EXPR:
2863 return build_zero_cst (type);
2865 case MULT_EXPR:
2866 case TRUTH_AND_EXPR:
2867 case TRUTH_ANDIF_EXPR:
2868 case EQ_EXPR:
2869 return fold_convert_loc (loc, type, integer_one_node);
2871 case BIT_AND_EXPR:
2872 return fold_convert_loc (loc, type, integer_minus_one_node);
2874 case MAX_EXPR:
2875 if (SCALAR_FLOAT_TYPE_P (type))
2877 REAL_VALUE_TYPE max, min;
2878 if (HONOR_INFINITIES (TYPE_MODE (type)))
2880 real_inf (&max);
2881 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2883 else
2884 real_maxval (&min, 1, TYPE_MODE (type));
2885 return build_real (type, min);
2887 else
2889 gcc_assert (INTEGRAL_TYPE_P (type));
2890 return TYPE_MIN_VALUE (type);
2893 case MIN_EXPR:
2894 if (SCALAR_FLOAT_TYPE_P (type))
2896 REAL_VALUE_TYPE max;
2897 if (HONOR_INFINITIES (TYPE_MODE (type)))
2898 real_inf (&max);
2899 else
2900 real_maxval (&max, 0, TYPE_MODE (type));
2901 return build_real (type, max);
2903 else
2905 gcc_assert (INTEGRAL_TYPE_P (type));
2906 return TYPE_MAX_VALUE (type);
2909 default:
2910 gcc_unreachable ();
2914 /* Return alignment to be assumed for var in CLAUSE, which should be
2915 OMP_CLAUSE_ALIGNED. */
2917 static tree
2918 omp_clause_aligned_alignment (tree clause)
2920 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
2921 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
2923 /* Otherwise return implementation defined alignment. */
2924 unsigned int al = 1;
2925 enum machine_mode mode, vmode;
2926 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2927 if (vs)
2928 vs = 1 << floor_log2 (vs);
2929 static enum mode_class classes[]
2930 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
2931 for (int i = 0; i < 4; i += 2)
2932 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
2933 mode != VOIDmode;
2934 mode = GET_MODE_WIDER_MODE (mode))
2936 vmode = targetm.vectorize.preferred_simd_mode (mode);
2937 if (GET_MODE_CLASS (vmode) != classes[i + 1])
2938 continue;
2939 while (vs
2940 && GET_MODE_SIZE (vmode) < vs
2941 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
2942 vmode = GET_MODE_2XWIDER_MODE (vmode);
2944 tree type = lang_hooks.types.type_for_mode (mode, 1);
2945 if (type == NULL_TREE || TYPE_MODE (type) != mode)
2946 continue;
2947 type = build_vector_type (type, GET_MODE_SIZE (vmode)
2948 / GET_MODE_SIZE (mode));
2949 if (TYPE_MODE (type) != vmode)
2950 continue;
2951 if (TYPE_ALIGN_UNIT (type) > al)
2952 al = TYPE_ALIGN_UNIT (type);
2954 return build_int_cst (integer_type_node, al);
2957 /* Return maximum possible vectorization factor for the target. */
2959 static int
2960 omp_max_vf (void)
2962 if (!optimize
2963 || optimize_debug
2964 || !flag_tree_loop_optimize
2965 || (!flag_tree_loop_vectorize
2966 && (global_options_set.x_flag_tree_loop_vectorize
2967 || global_options_set.x_flag_tree_vectorize)))
2968 return 1;
2970 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2971 if (vs)
2973 vs = 1 << floor_log2 (vs);
2974 return vs;
2976 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
2977 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
2978 return GET_MODE_NUNITS (vqimode);
2979 return 1;
2982 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2983 privatization. */
2985 static bool
2986 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
2987 tree &idx, tree &lane, tree &ivar, tree &lvar)
2989 if (max_vf == 0)
2991 max_vf = omp_max_vf ();
2992 if (max_vf > 1)
2994 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2995 OMP_CLAUSE_SAFELEN);
2996 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
2997 max_vf = 1;
2998 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
2999 max_vf) == -1)
3000 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3002 if (max_vf > 1)
3004 idx = create_tmp_var (unsigned_type_node, NULL);
3005 lane = create_tmp_var (unsigned_type_node, NULL);
3008 if (max_vf == 1)
3009 return false;
3011 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3012 tree avar = create_tmp_var_raw (atype, NULL);
3013 if (TREE_ADDRESSABLE (new_var))
3014 TREE_ADDRESSABLE (avar) = 1;
3015 DECL_ATTRIBUTES (avar)
3016 = tree_cons (get_identifier ("omp simd array"), NULL,
3017 DECL_ATTRIBUTES (avar));
3018 gimple_add_tmp_var (avar);
3019 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3020 NULL_TREE, NULL_TREE);
3021 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3022 NULL_TREE, NULL_TREE);
3023 if (DECL_P (new_var))
3025 SET_DECL_VALUE_EXPR (new_var, lvar);
3026 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3028 return true;
3031 /* Helper function of lower_rec_input_clauses. For a reference
3032 in simd reduction, add an underlying variable it will reference. */
3034 static void
3035 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3037 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3038 if (TREE_CONSTANT (z))
3040 const char *name = NULL;
3041 if (DECL_NAME (new_vard))
3042 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3044 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3045 gimple_add_tmp_var (z);
3046 TREE_ADDRESSABLE (z) = 1;
3047 z = build_fold_addr_expr_loc (loc, z);
3048 gimplify_assign (new_vard, z, ilist);
3052 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3053 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3054 private variables. Initialization statements go in ILIST, while calls
3055 to destructors go in DLIST. */
3057 static void
3058 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3059 omp_context *ctx, struct omp_for_data *fd)
3061 tree c, dtor, copyin_seq, x, ptr;
3062 bool copyin_by_ref = false;
3063 bool lastprivate_firstprivate = false;
3064 bool reduction_omp_orig_ref = false;
3065 int pass;
3066 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3067 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3068 int max_vf = 0;
3069 tree lane = NULL_TREE, idx = NULL_TREE;
3070 tree ivar = NULL_TREE, lvar = NULL_TREE;
3071 gimple_seq llist[2] = { NULL, NULL };
3073 copyin_seq = NULL;
3075 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3076 with data sharing clauses referencing variable sized vars. That
3077 is unnecessarily hard to support and very unlikely to result in
3078 vectorized code anyway. */
3079 if (is_simd)
3080 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3081 switch (OMP_CLAUSE_CODE (c))
3083 case OMP_CLAUSE_LINEAR:
3084 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3085 max_vf = 1;
3086 /* FALLTHRU */
3087 case OMP_CLAUSE_REDUCTION:
3088 case OMP_CLAUSE_PRIVATE:
3089 case OMP_CLAUSE_FIRSTPRIVATE:
3090 case OMP_CLAUSE_LASTPRIVATE:
3091 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3092 max_vf = 1;
3093 break;
3094 default:
3095 continue;
3098 /* Do all the fixed sized types in the first pass, and the variable sized
3099 types in the second pass. This makes sure that the scalar arguments to
3100 the variable sized types are processed before we use them in the
3101 variable sized operations. */
3102 for (pass = 0; pass < 2; ++pass)
3104 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3106 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3107 tree var, new_var;
3108 bool by_ref;
3109 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3111 switch (c_kind)
3113 case OMP_CLAUSE_PRIVATE:
3114 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3115 continue;
3116 break;
3117 case OMP_CLAUSE_SHARED:
3118 /* Ignore shared directives in teams construct. */
3119 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3120 continue;
3121 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3123 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3124 continue;
3126 case OMP_CLAUSE_FIRSTPRIVATE:
3127 case OMP_CLAUSE_COPYIN:
3128 case OMP_CLAUSE_LINEAR:
3129 break;
3130 case OMP_CLAUSE_REDUCTION:
3131 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3132 reduction_omp_orig_ref = true;
3133 break;
3134 case OMP_CLAUSE__LOOPTEMP_:
3135 /* Handle _looptemp_ clauses only on parallel. */
3136 if (fd)
3137 continue;
3138 break;
3139 case OMP_CLAUSE_LASTPRIVATE:
3140 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3142 lastprivate_firstprivate = true;
3143 if (pass != 0)
3144 continue;
3146 /* Even without corresponding firstprivate, if
3147 decl is Fortran allocatable, it needs outer var
3148 reference. */
3149 else if (pass == 0
3150 && lang_hooks.decls.omp_private_outer_ref
3151 (OMP_CLAUSE_DECL (c)))
3152 lastprivate_firstprivate = true;
3153 break;
3154 case OMP_CLAUSE_ALIGNED:
3155 if (pass == 0)
3156 continue;
3157 var = OMP_CLAUSE_DECL (c);
3158 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3159 && !is_global_var (var))
3161 new_var = maybe_lookup_decl (var, ctx);
3162 if (new_var == NULL_TREE)
3163 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3164 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3165 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3166 omp_clause_aligned_alignment (c));
3167 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3168 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3169 gimplify_and_add (x, ilist);
3171 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3172 && is_global_var (var))
3174 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3175 new_var = lookup_decl (var, ctx);
3176 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3177 t = build_fold_addr_expr_loc (clause_loc, t);
3178 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3179 t = build_call_expr_loc (clause_loc, t2, 2, t,
3180 omp_clause_aligned_alignment (c));
3181 t = fold_convert_loc (clause_loc, ptype, t);
3182 x = create_tmp_var (ptype, NULL);
3183 t = build2 (MODIFY_EXPR, ptype, x, t);
3184 gimplify_and_add (t, ilist);
3185 t = build_simple_mem_ref_loc (clause_loc, x);
3186 SET_DECL_VALUE_EXPR (new_var, t);
3187 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3189 continue;
3190 default:
3191 continue;
3194 new_var = var = OMP_CLAUSE_DECL (c);
3195 if (c_kind != OMP_CLAUSE_COPYIN)
3196 new_var = lookup_decl (var, ctx);
3198 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3200 if (pass != 0)
3201 continue;
3203 else if (is_variable_sized (var))
3205 /* For variable sized types, we need to allocate the
3206 actual storage here. Call alloca and store the
3207 result in the pointer decl that we created elsewhere. */
3208 if (pass == 0)
3209 continue;
3211 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3213 gimple stmt;
3214 tree tmp, atmp;
3216 ptr = DECL_VALUE_EXPR (new_var);
3217 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3218 ptr = TREE_OPERAND (ptr, 0);
3219 gcc_assert (DECL_P (ptr));
3220 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3222 /* void *tmp = __builtin_alloca */
3223 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3224 stmt = gimple_build_call (atmp, 1, x);
3225 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3226 gimple_add_tmp_var (tmp);
3227 gimple_call_set_lhs (stmt, tmp);
3229 gimple_seq_add_stmt (ilist, stmt);
3231 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3232 gimplify_assign (ptr, x, ilist);
3235 else if (is_reference (var))
3237 /* For references that are being privatized for Fortran,
3238 allocate new backing storage for the new pointer
3239 variable. This allows us to avoid changing all the
3240 code that expects a pointer to something that expects
3241 a direct variable. */
3242 if (pass == 0)
3243 continue;
3245 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3246 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3248 x = build_receiver_ref (var, false, ctx);
3249 x = build_fold_addr_expr_loc (clause_loc, x);
3251 else if (TREE_CONSTANT (x))
3253 /* For reduction in SIMD loop, defer adding the
3254 initialization of the reference, because if we decide
3255 to use SIMD array for it, the initilization could cause
3256 expansion ICE. */
3257 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3258 x = NULL_TREE;
3259 else
3261 const char *name = NULL;
3262 if (DECL_NAME (var))
3263 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3265 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3266 name);
3267 gimple_add_tmp_var (x);
3268 TREE_ADDRESSABLE (x) = 1;
3269 x = build_fold_addr_expr_loc (clause_loc, x);
3272 else
3274 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3275 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3278 if (x)
3280 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3281 gimplify_assign (new_var, x, ilist);
3284 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3286 else if (c_kind == OMP_CLAUSE_REDUCTION
3287 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3289 if (pass == 0)
3290 continue;
3292 else if (pass != 0)
3293 continue;
3295 switch (OMP_CLAUSE_CODE (c))
3297 case OMP_CLAUSE_SHARED:
3298 /* Ignore shared directives in teams construct. */
3299 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3300 continue;
3301 /* Shared global vars are just accessed directly. */
3302 if (is_global_var (new_var))
3303 break;
3304 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3305 needs to be delayed until after fixup_child_record_type so
3306 that we get the correct type during the dereference. */
3307 by_ref = use_pointer_for_field (var, ctx);
3308 x = build_receiver_ref (var, by_ref, ctx);
3309 SET_DECL_VALUE_EXPR (new_var, x);
3310 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3312 /* ??? If VAR is not passed by reference, and the variable
3313 hasn't been initialized yet, then we'll get a warning for
3314 the store into the omp_data_s structure. Ideally, we'd be
3315 able to notice this and not store anything at all, but
3316 we're generating code too early. Suppress the warning. */
3317 if (!by_ref)
3318 TREE_NO_WARNING (var) = 1;
3319 break;
3321 case OMP_CLAUSE_LASTPRIVATE:
3322 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3323 break;
3324 /* FALLTHRU */
3326 case OMP_CLAUSE_PRIVATE:
3327 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3328 x = build_outer_var_ref (var, ctx);
3329 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3331 if (is_task_ctx (ctx))
3332 x = build_receiver_ref (var, false, ctx);
3333 else
3334 x = build_outer_var_ref (var, ctx);
3336 else
3337 x = NULL;
3338 do_private:
3339 tree nx;
3340 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3341 if (is_simd)
3343 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3344 if ((TREE_ADDRESSABLE (new_var) || nx || y
3345 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3346 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3347 idx, lane, ivar, lvar))
3349 if (nx)
3350 x = lang_hooks.decls.omp_clause_default_ctor
3351 (c, unshare_expr (ivar), x);
3352 if (nx && x)
3353 gimplify_and_add (x, &llist[0]);
3354 if (y)
3356 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3357 if (y)
3359 gimple_seq tseq = NULL;
3361 dtor = y;
3362 gimplify_stmt (&dtor, &tseq);
3363 gimple_seq_add_seq (&llist[1], tseq);
3366 break;
3369 if (nx)
3370 gimplify_and_add (nx, ilist);
3371 /* FALLTHRU */
3373 do_dtor:
3374 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3375 if (x)
3377 gimple_seq tseq = NULL;
3379 dtor = x;
3380 gimplify_stmt (&dtor, &tseq);
3381 gimple_seq_add_seq (dlist, tseq);
3383 break;
3385 case OMP_CLAUSE_LINEAR:
3386 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3387 goto do_firstprivate;
3388 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3389 x = NULL;
3390 else
3391 x = build_outer_var_ref (var, ctx);
3392 goto do_private;
3394 case OMP_CLAUSE_FIRSTPRIVATE:
3395 if (is_task_ctx (ctx))
3397 if (is_reference (var) || is_variable_sized (var))
3398 goto do_dtor;
3399 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3400 ctx))
3401 || use_pointer_for_field (var, NULL))
3403 x = build_receiver_ref (var, false, ctx);
3404 SET_DECL_VALUE_EXPR (new_var, x);
3405 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3406 goto do_dtor;
3409 do_firstprivate:
3410 x = build_outer_var_ref (var, ctx);
3411 if (is_simd)
3413 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3414 && gimple_omp_for_combined_into_p (ctx->stmt))
3416 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3417 tree stept = TREE_TYPE (t);
3418 tree ct = find_omp_clause (clauses,
3419 OMP_CLAUSE__LOOPTEMP_);
3420 gcc_assert (ct);
3421 tree l = OMP_CLAUSE_DECL (ct);
3422 tree n1 = fd->loop.n1;
3423 tree step = fd->loop.step;
3424 tree itype = TREE_TYPE (l);
3425 if (POINTER_TYPE_P (itype))
3426 itype = signed_type_for (itype);
3427 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3428 if (TYPE_UNSIGNED (itype)
3429 && fd->loop.cond_code == GT_EXPR)
3430 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3431 fold_build1 (NEGATE_EXPR, itype, l),
3432 fold_build1 (NEGATE_EXPR,
3433 itype, step));
3434 else
3435 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3436 t = fold_build2 (MULT_EXPR, stept,
3437 fold_convert (stept, l), t);
3439 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3441 x = lang_hooks.decls.omp_clause_linear_ctor
3442 (c, new_var, x, t);
3443 gimplify_and_add (x, ilist);
3444 goto do_dtor;
3447 if (POINTER_TYPE_P (TREE_TYPE (x)))
3448 x = fold_build2 (POINTER_PLUS_EXPR,
3449 TREE_TYPE (x), x, t);
3450 else
3451 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3454 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3455 || TREE_ADDRESSABLE (new_var))
3456 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3457 idx, lane, ivar, lvar))
3459 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3461 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3462 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3463 gimplify_and_add (x, ilist);
3464 gimple_stmt_iterator gsi
3465 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3466 gimple g
3467 = gimple_build_assign (unshare_expr (lvar), iv);
3468 gsi_insert_before_without_update (&gsi, g,
3469 GSI_SAME_STMT);
3470 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3471 enum tree_code code = PLUS_EXPR;
3472 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3473 code = POINTER_PLUS_EXPR;
3474 g = gimple_build_assign_with_ops (code, iv, iv, t);
3475 gsi_insert_before_without_update (&gsi, g,
3476 GSI_SAME_STMT);
3477 break;
3479 x = lang_hooks.decls.omp_clause_copy_ctor
3480 (c, unshare_expr (ivar), x);
3481 gimplify_and_add (x, &llist[0]);
3482 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3483 if (x)
3485 gimple_seq tseq = NULL;
3487 dtor = x;
3488 gimplify_stmt (&dtor, &tseq);
3489 gimple_seq_add_seq (&llist[1], tseq);
3491 break;
3494 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3495 gimplify_and_add (x, ilist);
3496 goto do_dtor;
3498 case OMP_CLAUSE__LOOPTEMP_:
3499 gcc_assert (is_parallel_ctx (ctx));
3500 x = build_outer_var_ref (var, ctx);
3501 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3502 gimplify_and_add (x, ilist);
3503 break;
3505 case OMP_CLAUSE_COPYIN:
3506 by_ref = use_pointer_for_field (var, NULL);
3507 x = build_receiver_ref (var, by_ref, ctx);
3508 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3509 append_to_statement_list (x, &copyin_seq);
3510 copyin_by_ref |= by_ref;
3511 break;
3513 case OMP_CLAUSE_REDUCTION:
3514 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3516 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3517 gimple tseq;
3518 x = build_outer_var_ref (var, ctx);
3520 if (is_reference (var)
3521 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3522 TREE_TYPE (x)))
3523 x = build_fold_addr_expr_loc (clause_loc, x);
3524 SET_DECL_VALUE_EXPR (placeholder, x);
3525 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3526 tree new_vard = new_var;
3527 if (is_reference (var))
3529 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3530 new_vard = TREE_OPERAND (new_var, 0);
3531 gcc_assert (DECL_P (new_vard));
3533 if (is_simd
3534 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3535 idx, lane, ivar, lvar))
3537 if (new_vard == new_var)
3539 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3540 SET_DECL_VALUE_EXPR (new_var, ivar);
3542 else
3544 SET_DECL_VALUE_EXPR (new_vard,
3545 build_fold_addr_expr (ivar));
3546 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3548 x = lang_hooks.decls.omp_clause_default_ctor
3549 (c, unshare_expr (ivar),
3550 build_outer_var_ref (var, ctx));
3551 if (x)
3552 gimplify_and_add (x, &llist[0]);
3553 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3555 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3556 lower_omp (&tseq, ctx);
3557 gimple_seq_add_seq (&llist[0], tseq);
3559 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3560 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3561 lower_omp (&tseq, ctx);
3562 gimple_seq_add_seq (&llist[1], tseq);
3563 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3564 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3565 if (new_vard == new_var)
3566 SET_DECL_VALUE_EXPR (new_var, lvar);
3567 else
3568 SET_DECL_VALUE_EXPR (new_vard,
3569 build_fold_addr_expr (lvar));
3570 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3571 if (x)
3573 tseq = NULL;
3574 dtor = x;
3575 gimplify_stmt (&dtor, &tseq);
3576 gimple_seq_add_seq (&llist[1], tseq);
3578 break;
3580 /* If this is a reference to constant size reduction var
3581 with placeholder, we haven't emitted the initializer
3582 for it because it is undesirable if SIMD arrays are used.
3583 But if they aren't used, we need to emit the deferred
3584 initialization now. */
3585 else if (is_reference (var) && is_simd)
3586 handle_simd_reference (clause_loc, new_vard, ilist);
3587 x = lang_hooks.decls.omp_clause_default_ctor
3588 (c, unshare_expr (new_var),
3589 build_outer_var_ref (var, ctx));
3590 if (x)
3591 gimplify_and_add (x, ilist);
3592 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3594 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3595 lower_omp (&tseq, ctx);
3596 gimple_seq_add_seq (ilist, tseq);
3598 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3599 if (is_simd)
3601 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3602 lower_omp (&tseq, ctx);
3603 gimple_seq_add_seq (dlist, tseq);
3604 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3606 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3607 goto do_dtor;
3609 else
3611 x = omp_reduction_init (c, TREE_TYPE (new_var));
3612 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3613 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3615 /* reduction(-:var) sums up the partial results, so it
3616 acts identically to reduction(+:var). */
3617 if (code == MINUS_EXPR)
3618 code = PLUS_EXPR;
3620 tree new_vard = new_var;
3621 if (is_simd && is_reference (var))
3623 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3624 new_vard = TREE_OPERAND (new_var, 0);
3625 gcc_assert (DECL_P (new_vard));
3627 if (is_simd
3628 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3629 idx, lane, ivar, lvar))
3631 tree ref = build_outer_var_ref (var, ctx);
3633 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3635 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3636 ref = build_outer_var_ref (var, ctx);
3637 gimplify_assign (ref, x, &llist[1]);
3639 if (new_vard != new_var)
3641 SET_DECL_VALUE_EXPR (new_vard,
3642 build_fold_addr_expr (lvar));
3643 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3646 else
3648 if (is_reference (var) && is_simd)
3649 handle_simd_reference (clause_loc, new_vard, ilist);
3650 gimplify_assign (new_var, x, ilist);
3651 if (is_simd)
3653 tree ref = build_outer_var_ref (var, ctx);
3655 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3656 ref = build_outer_var_ref (var, ctx);
3657 gimplify_assign (ref, x, dlist);
3661 break;
3663 default:
3664 gcc_unreachable ();
3669 if (lane)
3671 tree uid = create_tmp_var (ptr_type_node, "simduid");
3672 /* Don't want uninit warnings on simduid, it is always uninitialized,
3673 but we use it not for the value, but for the DECL_UID only. */
3674 TREE_NO_WARNING (uid) = 1;
3675 gimple g
3676 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3677 gimple_call_set_lhs (g, lane);
3678 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3679 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3680 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3681 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3682 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3683 gimple_omp_for_set_clauses (ctx->stmt, c);
3684 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3685 build_int_cst (unsigned_type_node, 0),
3686 NULL_TREE);
3687 gimple_seq_add_stmt (ilist, g);
3688 for (int i = 0; i < 2; i++)
3689 if (llist[i])
3691 tree vf = create_tmp_var (unsigned_type_node, NULL);
3692 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3693 gimple_call_set_lhs (g, vf);
3694 gimple_seq *seq = i == 0 ? ilist : dlist;
3695 gimple_seq_add_stmt (seq, g);
3696 tree t = build_int_cst (unsigned_type_node, 0);
3697 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3698 gimple_seq_add_stmt (seq, g);
3699 tree body = create_artificial_label (UNKNOWN_LOCATION);
3700 tree header = create_artificial_label (UNKNOWN_LOCATION);
3701 tree end = create_artificial_label (UNKNOWN_LOCATION);
3702 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3703 gimple_seq_add_stmt (seq, gimple_build_label (body));
3704 gimple_seq_add_seq (seq, llist[i]);
3705 t = build_int_cst (unsigned_type_node, 1);
3706 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3707 gimple_seq_add_stmt (seq, g);
3708 gimple_seq_add_stmt (seq, gimple_build_label (header));
3709 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3710 gimple_seq_add_stmt (seq, g);
3711 gimple_seq_add_stmt (seq, gimple_build_label (end));
3715 /* The copyin sequence is not to be executed by the main thread, since
3716 that would result in self-copies. Perhaps not visible to scalars,
3717 but it certainly is to C++ operator=. */
3718 if (copyin_seq)
3720 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3722 x = build2 (NE_EXPR, boolean_type_node, x,
3723 build_int_cst (TREE_TYPE (x), 0));
3724 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3725 gimplify_and_add (x, ilist);
3728 /* If any copyin variable is passed by reference, we must ensure the
3729 master thread doesn't modify it before it is copied over in all
3730 threads. Similarly for variables in both firstprivate and
3731 lastprivate clauses we need to ensure the lastprivate copying
3732 happens after firstprivate copying in all threads. And similarly
3733 for UDRs if initializer expression refers to omp_orig. */
3734 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3736 /* Don't add any barrier for #pragma omp simd or
3737 #pragma omp distribute. */
3738 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3739 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
3740 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3743 /* If max_vf is non-zero, then we can use only a vectorization factor
3744 up to the max_vf we chose. So stick it into the safelen clause. */
3745 if (max_vf)
3747 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3748 OMP_CLAUSE_SAFELEN);
3749 if (c == NULL_TREE
3750 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
3751 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3752 max_vf) == 1))
3754 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3755 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3756 max_vf);
3757 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3758 gimple_omp_for_set_clauses (ctx->stmt, c);
3764 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3765 both parallel and workshare constructs. PREDICATE may be NULL if it's
3766 always true. */
3768 static void
3769 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3770 omp_context *ctx)
3772 tree x, c, label = NULL, orig_clauses = clauses;
3773 bool par_clauses = false;
3774 tree simduid = NULL, lastlane = NULL;
3776 /* Early exit if there are no lastprivate or linear clauses. */
3777 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3778 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3779 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3780 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3781 break;
3782 if (clauses == NULL)
3784 /* If this was a workshare clause, see if it had been combined
3785 with its parallel. In that case, look for the clauses on the
3786 parallel statement itself. */
3787 if (is_parallel_ctx (ctx))
3788 return;
3790 ctx = ctx->outer;
3791 if (ctx == NULL || !is_parallel_ctx (ctx))
3792 return;
3794 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3795 OMP_CLAUSE_LASTPRIVATE);
3796 if (clauses == NULL)
3797 return;
3798 par_clauses = true;
3801 if (predicate)
3803 gimple stmt;
3804 tree label_true, arm1, arm2;
3806 label = create_artificial_label (UNKNOWN_LOCATION);
3807 label_true = create_artificial_label (UNKNOWN_LOCATION);
3808 arm1 = TREE_OPERAND (predicate, 0);
3809 arm2 = TREE_OPERAND (predicate, 1);
3810 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3811 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3812 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3813 label_true, label);
3814 gimple_seq_add_stmt (stmt_list, stmt);
3815 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3818 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3819 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3821 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3822 if (simduid)
3823 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3826 for (c = clauses; c ;)
3828 tree var, new_var;
3829 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3831 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3832 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3833 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3835 var = OMP_CLAUSE_DECL (c);
3836 new_var = lookup_decl (var, ctx);
3838 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3840 tree val = DECL_VALUE_EXPR (new_var);
3841 if (TREE_CODE (val) == ARRAY_REF
3842 && VAR_P (TREE_OPERAND (val, 0))
3843 && lookup_attribute ("omp simd array",
3844 DECL_ATTRIBUTES (TREE_OPERAND (val,
3845 0))))
3847 if (lastlane == NULL)
3849 lastlane = create_tmp_var (unsigned_type_node, NULL);
3850 gimple g
3851 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3852 2, simduid,
3853 TREE_OPERAND (val, 1));
3854 gimple_call_set_lhs (g, lastlane);
3855 gimple_seq_add_stmt (stmt_list, g);
3857 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3858 TREE_OPERAND (val, 0), lastlane,
3859 NULL_TREE, NULL_TREE);
3863 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3864 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
3866 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
3867 gimple_seq_add_seq (stmt_list,
3868 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
3869 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
3871 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3872 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
3874 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
3875 gimple_seq_add_seq (stmt_list,
3876 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
3877 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
3880 x = build_outer_var_ref (var, ctx);
3881 if (is_reference (var))
3882 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3883 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
3884 gimplify_and_add (x, stmt_list);
3886 c = OMP_CLAUSE_CHAIN (c);
3887 if (c == NULL && !par_clauses)
3889 /* If this was a workshare clause, see if it had been combined
3890 with its parallel. In that case, continue looking for the
3891 clauses also on the parallel statement itself. */
3892 if (is_parallel_ctx (ctx))
3893 break;
3895 ctx = ctx->outer;
3896 if (ctx == NULL || !is_parallel_ctx (ctx))
3897 break;
3899 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3900 OMP_CLAUSE_LASTPRIVATE);
3901 par_clauses = true;
3905 if (label)
3906 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
3910 /* Generate code to implement the REDUCTION clauses. */
3912 static void
3913 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
3915 gimple_seq sub_seq = NULL;
3916 gimple stmt;
3917 tree x, c;
3918 int count = 0;
3920 /* SIMD reductions are handled in lower_rec_input_clauses. */
3921 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3922 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3923 return;
3925 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3926 update in that case, otherwise use a lock. */
3927 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
3928 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
3930 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3932 /* Never use OMP_ATOMIC for array reductions or UDRs. */
3933 count = -1;
3934 break;
3936 count++;
3939 if (count == 0)
3940 return;
3942 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3944 tree var, ref, new_var;
3945 enum tree_code code;
3946 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3948 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
3949 continue;
3951 var = OMP_CLAUSE_DECL (c);
3952 new_var = lookup_decl (var, ctx);
3953 if (is_reference (var))
3954 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3955 ref = build_outer_var_ref (var, ctx);
3956 code = OMP_CLAUSE_REDUCTION_CODE (c);
3958 /* reduction(-:var) sums up the partial results, so it acts
3959 identically to reduction(+:var). */
3960 if (code == MINUS_EXPR)
3961 code = PLUS_EXPR;
3963 if (count == 1)
3965 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
3967 addr = save_expr (addr);
3968 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
3969 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
3970 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
3971 gimplify_and_add (x, stmt_seqp);
3972 return;
3975 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3977 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3979 if (is_reference (var)
3980 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3981 TREE_TYPE (ref)))
3982 ref = build_fold_addr_expr_loc (clause_loc, ref);
3983 SET_DECL_VALUE_EXPR (placeholder, ref);
3984 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3985 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
3986 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
3987 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3988 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
3990 else
3992 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3993 ref = build_outer_var_ref (var, ctx);
3994 gimplify_assign (ref, x, &sub_seq);
3998 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4000 gimple_seq_add_stmt (stmt_seqp, stmt);
4002 gimple_seq_add_seq (stmt_seqp, sub_seq);
4004 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4006 gimple_seq_add_stmt (stmt_seqp, stmt);
4010 /* Generate code to implement the COPYPRIVATE clauses. */
4012 static void
4013 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4014 omp_context *ctx)
4016 tree c;
4018 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4020 tree var, new_var, ref, x;
4021 bool by_ref;
4022 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4024 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4025 continue;
4027 var = OMP_CLAUSE_DECL (c);
4028 by_ref = use_pointer_for_field (var, NULL);
4030 ref = build_sender_ref (var, ctx);
4031 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4032 if (by_ref)
4034 x = build_fold_addr_expr_loc (clause_loc, new_var);
4035 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4037 gimplify_assign (ref, x, slist);
4039 ref = build_receiver_ref (var, false, ctx);
4040 if (by_ref)
4042 ref = fold_convert_loc (clause_loc,
4043 build_pointer_type (TREE_TYPE (new_var)),
4044 ref);
4045 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4047 if (is_reference (var))
4049 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4050 ref = build_simple_mem_ref_loc (clause_loc, ref);
4051 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4053 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4054 gimplify_and_add (x, rlist);
4059 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4060 and REDUCTION from the sender (aka parent) side. */
4062 static void
4063 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4064 omp_context *ctx)
4066 tree c;
4068 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4070 tree val, ref, x, var;
4071 bool by_ref, do_in = false, do_out = false;
4072 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4074 switch (OMP_CLAUSE_CODE (c))
4076 case OMP_CLAUSE_PRIVATE:
4077 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4078 break;
4079 continue;
4080 case OMP_CLAUSE_FIRSTPRIVATE:
4081 case OMP_CLAUSE_COPYIN:
4082 case OMP_CLAUSE_LASTPRIVATE:
4083 case OMP_CLAUSE_REDUCTION:
4084 case OMP_CLAUSE__LOOPTEMP_:
4085 break;
4086 default:
4087 continue;
4090 val = OMP_CLAUSE_DECL (c);
4091 var = lookup_decl_in_outer_ctx (val, ctx);
4093 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4094 && is_global_var (var))
4095 continue;
4096 if (is_variable_sized (val))
4097 continue;
4098 by_ref = use_pointer_for_field (val, NULL);
4100 switch (OMP_CLAUSE_CODE (c))
4102 case OMP_CLAUSE_PRIVATE:
4103 case OMP_CLAUSE_FIRSTPRIVATE:
4104 case OMP_CLAUSE_COPYIN:
4105 case OMP_CLAUSE__LOOPTEMP_:
4106 do_in = true;
4107 break;
4109 case OMP_CLAUSE_LASTPRIVATE:
4110 if (by_ref || is_reference (val))
4112 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4113 continue;
4114 do_in = true;
4116 else
4118 do_out = true;
4119 if (lang_hooks.decls.omp_private_outer_ref (val))
4120 do_in = true;
4122 break;
4124 case OMP_CLAUSE_REDUCTION:
4125 do_in = true;
4126 do_out = !(by_ref || is_reference (val));
4127 break;
4129 default:
4130 gcc_unreachable ();
4133 if (do_in)
4135 ref = build_sender_ref (val, ctx);
4136 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4137 gimplify_assign (ref, x, ilist);
4138 if (is_task_ctx (ctx))
4139 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4142 if (do_out)
4144 ref = build_sender_ref (val, ctx);
4145 gimplify_assign (var, ref, olist);
4150 /* Generate code to implement SHARED from the sender (aka parent)
4151 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4152 list things that got automatically shared. */
4154 static void
4155 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4157 tree var, ovar, nvar, f, x, record_type;
4159 if (ctx->record_type == NULL)
4160 return;
4162 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4163 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4165 ovar = DECL_ABSTRACT_ORIGIN (f);
4166 nvar = maybe_lookup_decl (ovar, ctx);
4167 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4168 continue;
4170 /* If CTX is a nested parallel directive. Find the immediately
4171 enclosing parallel or workshare construct that contains a
4172 mapping for OVAR. */
4173 var = lookup_decl_in_outer_ctx (ovar, ctx);
4175 if (use_pointer_for_field (ovar, ctx))
4177 x = build_sender_ref (ovar, ctx);
4178 var = build_fold_addr_expr (var);
4179 gimplify_assign (x, var, ilist);
4181 else
4183 x = build_sender_ref (ovar, ctx);
4184 gimplify_assign (x, var, ilist);
4186 if (!TREE_READONLY (var)
4187 /* We don't need to receive a new reference to a result
4188 or parm decl. In fact we may not store to it as we will
4189 invalidate any pending RSO and generate wrong gimple
4190 during inlining. */
4191 && !((TREE_CODE (var) == RESULT_DECL
4192 || TREE_CODE (var) == PARM_DECL)
4193 && DECL_BY_REFERENCE (var)))
4195 x = build_sender_ref (ovar, ctx);
4196 gimplify_assign (var, x, olist);
4203 /* A convenience function to build an empty GIMPLE_COND with just the
4204 condition. */
4206 static gimple
4207 gimple_build_cond_empty (tree cond)
4209 enum tree_code pred_code;
4210 tree lhs, rhs;
4212 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4213 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4217 /* Build the function calls to GOMP_parallel_start etc to actually
4218 generate the parallel operation. REGION is the parallel region
4219 being expanded. BB is the block where to insert the code. WS_ARGS
4220 will be set if this is a call to a combined parallel+workshare
4221 construct, it contains the list of additional arguments needed by
4222 the workshare construct. */
4224 static void
4225 expand_parallel_call (struct omp_region *region, basic_block bb,
4226 gimple entry_stmt, vec<tree, va_gc> *ws_args)
4228 tree t, t1, t2, val, cond, c, clauses, flags;
4229 gimple_stmt_iterator gsi;
4230 gimple stmt;
4231 enum built_in_function start_ix;
4232 int start_ix2;
4233 location_t clause_loc;
4234 vec<tree, va_gc> *args;
4236 clauses = gimple_omp_parallel_clauses (entry_stmt);
4238 /* Determine what flavor of GOMP_parallel we will be
4239 emitting. */
4240 start_ix = BUILT_IN_GOMP_PARALLEL;
4241 if (is_combined_parallel (region))
4243 switch (region->inner->type)
4245 case GIMPLE_OMP_FOR:
4246 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4247 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4248 + (region->inner->sched_kind
4249 == OMP_CLAUSE_SCHEDULE_RUNTIME
4250 ? 3 : region->inner->sched_kind));
4251 start_ix = (enum built_in_function)start_ix2;
4252 break;
4253 case GIMPLE_OMP_SECTIONS:
4254 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4255 break;
4256 default:
4257 gcc_unreachable ();
4261 /* By default, the value of NUM_THREADS is zero (selected at run time)
4262 and there is no conditional. */
4263 cond = NULL_TREE;
4264 val = build_int_cst (unsigned_type_node, 0);
4265 flags = build_int_cst (unsigned_type_node, 0);
4267 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4268 if (c)
4269 cond = OMP_CLAUSE_IF_EXPR (c);
4271 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4272 if (c)
4274 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4275 clause_loc = OMP_CLAUSE_LOCATION (c);
4277 else
4278 clause_loc = gimple_location (entry_stmt);
4280 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4281 if (c)
4282 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4284 /* Ensure 'val' is of the correct type. */
4285 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4287 /* If we found the clause 'if (cond)', build either
4288 (cond != 0) or (cond ? val : 1u). */
4289 if (cond)
4291 cond = gimple_boolify (cond);
4293 if (integer_zerop (val))
4294 val = fold_build2_loc (clause_loc,
4295 EQ_EXPR, unsigned_type_node, cond,
4296 build_int_cst (TREE_TYPE (cond), 0));
4297 else
4299 basic_block cond_bb, then_bb, else_bb;
4300 edge e, e_then, e_else;
4301 tree tmp_then, tmp_else, tmp_join, tmp_var;
4303 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4304 if (gimple_in_ssa_p (cfun))
4306 tmp_then = make_ssa_name (tmp_var, NULL);
4307 tmp_else = make_ssa_name (tmp_var, NULL);
4308 tmp_join = make_ssa_name (tmp_var, NULL);
4310 else
4312 tmp_then = tmp_var;
4313 tmp_else = tmp_var;
4314 tmp_join = tmp_var;
4317 e = split_block (bb, NULL);
4318 cond_bb = e->src;
4319 bb = e->dest;
4320 remove_edge (e);
4322 then_bb = create_empty_bb (cond_bb);
4323 else_bb = create_empty_bb (then_bb);
4324 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4325 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4327 stmt = gimple_build_cond_empty (cond);
4328 gsi = gsi_start_bb (cond_bb);
4329 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4331 gsi = gsi_start_bb (then_bb);
4332 stmt = gimple_build_assign (tmp_then, val);
4333 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4335 gsi = gsi_start_bb (else_bb);
4336 stmt = gimple_build_assign
4337 (tmp_else, build_int_cst (unsigned_type_node, 1));
4338 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4340 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4341 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4342 add_bb_to_loop (then_bb, cond_bb->loop_father);
4343 add_bb_to_loop (else_bb, cond_bb->loop_father);
4344 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4345 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4347 if (gimple_in_ssa_p (cfun))
4349 gimple phi = create_phi_node (tmp_join, bb);
4350 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4351 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4354 val = tmp_join;
4357 gsi = gsi_start_bb (bb);
4358 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4359 false, GSI_CONTINUE_LINKING);
4362 gsi = gsi_last_bb (bb);
4363 t = gimple_omp_parallel_data_arg (entry_stmt);
4364 if (t == NULL)
4365 t1 = null_pointer_node;
4366 else
4367 t1 = build_fold_addr_expr (t);
4368 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4370 vec_alloc (args, 4 + vec_safe_length (ws_args));
4371 args->quick_push (t2);
4372 args->quick_push (t1);
4373 args->quick_push (val);
4374 if (ws_args)
4375 args->splice (*ws_args);
4376 args->quick_push (flags);
4378 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4379 builtin_decl_explicit (start_ix), args);
4381 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4382 false, GSI_CONTINUE_LINKING);
4386 /* Build the function call to GOMP_task to actually
4387 generate the task operation. BB is the block where to insert the code. */
4389 static void
4390 expand_task_call (basic_block bb, gimple entry_stmt)
4392 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4393 gimple_stmt_iterator gsi;
4394 location_t loc = gimple_location (entry_stmt);
4396 clauses = gimple_omp_task_clauses (entry_stmt);
4398 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4399 if (c)
4400 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4401 else
4402 cond = boolean_true_node;
4404 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4405 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4406 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4407 flags = build_int_cst (unsigned_type_node,
4408 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4410 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4411 if (c)
4413 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4414 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4415 build_int_cst (unsigned_type_node, 2),
4416 build_int_cst (unsigned_type_node, 0));
4417 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4419 if (depend)
4420 depend = OMP_CLAUSE_DECL (depend);
4421 else
4422 depend = build_int_cst (ptr_type_node, 0);
4424 gsi = gsi_last_bb (bb);
4425 t = gimple_omp_task_data_arg (entry_stmt);
4426 if (t == NULL)
4427 t2 = null_pointer_node;
4428 else
4429 t2 = build_fold_addr_expr_loc (loc, t);
4430 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4431 t = gimple_omp_task_copy_fn (entry_stmt);
4432 if (t == NULL)
4433 t3 = null_pointer_node;
4434 else
4435 t3 = build_fold_addr_expr_loc (loc, t);
4437 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4438 8, t1, t2, t3,
4439 gimple_omp_task_arg_size (entry_stmt),
4440 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4441 depend);
4443 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4444 false, GSI_CONTINUE_LINKING);
4448 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4449 catch handler and return it. This prevents programs from violating the
4450 structured block semantics with throws. */
4452 static gimple_seq
4453 maybe_catch_exception (gimple_seq body)
4455 gimple g;
4456 tree decl;
4458 if (!flag_exceptions)
4459 return body;
4461 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4462 decl = lang_hooks.eh_protect_cleanup_actions ();
4463 else
4464 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4466 g = gimple_build_eh_must_not_throw (decl);
4467 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4468 GIMPLE_TRY_CATCH);
4470 return gimple_seq_alloc_with_stmt (g);
4473 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4475 static tree
4476 vec2chain (vec<tree, va_gc> *v)
4478 tree chain = NULL_TREE, t;
4479 unsigned ix;
4481 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4483 DECL_CHAIN (t) = chain;
4484 chain = t;
4487 return chain;
4491 /* Remove barriers in REGION->EXIT's block. Note that this is only
4492 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4493 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4494 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4495 removed. */
4497 static void
4498 remove_exit_barrier (struct omp_region *region)
4500 gimple_stmt_iterator gsi;
4501 basic_block exit_bb;
4502 edge_iterator ei;
4503 edge e;
4504 gimple stmt;
4505 int any_addressable_vars = -1;
4507 exit_bb = region->exit;
4509 /* If the parallel region doesn't return, we don't have REGION->EXIT
4510 block at all. */
4511 if (! exit_bb)
4512 return;
4514 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4515 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4516 statements that can appear in between are extremely limited -- no
4517 memory operations at all. Here, we allow nothing at all, so the
4518 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4519 gsi = gsi_last_bb (exit_bb);
4520 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4521 gsi_prev (&gsi);
4522 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4523 return;
4525 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4527 gsi = gsi_last_bb (e->src);
4528 if (gsi_end_p (gsi))
4529 continue;
4530 stmt = gsi_stmt (gsi);
4531 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4532 && !gimple_omp_return_nowait_p (stmt))
4534 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4535 in many cases. If there could be tasks queued, the barrier
4536 might be needed to let the tasks run before some local
4537 variable of the parallel that the task uses as shared
4538 runs out of scope. The task can be spawned either
4539 from within current function (this would be easy to check)
4540 or from some function it calls and gets passed an address
4541 of such a variable. */
4542 if (any_addressable_vars < 0)
4544 gimple parallel_stmt = last_stmt (region->entry);
4545 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4546 tree local_decls, block, decl;
4547 unsigned ix;
4549 any_addressable_vars = 0;
4550 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4551 if (TREE_ADDRESSABLE (decl))
4553 any_addressable_vars = 1;
4554 break;
4556 for (block = gimple_block (stmt);
4557 !any_addressable_vars
4558 && block
4559 && TREE_CODE (block) == BLOCK;
4560 block = BLOCK_SUPERCONTEXT (block))
4562 for (local_decls = BLOCK_VARS (block);
4563 local_decls;
4564 local_decls = DECL_CHAIN (local_decls))
4565 if (TREE_ADDRESSABLE (local_decls))
4567 any_addressable_vars = 1;
4568 break;
4570 if (block == gimple_block (parallel_stmt))
4571 break;
4574 if (!any_addressable_vars)
4575 gimple_omp_return_set_nowait (stmt);
4580 static void
4581 remove_exit_barriers (struct omp_region *region)
4583 if (region->type == GIMPLE_OMP_PARALLEL)
4584 remove_exit_barrier (region);
4586 if (region->inner)
4588 region = region->inner;
4589 remove_exit_barriers (region);
4590 while (region->next)
4592 region = region->next;
4593 remove_exit_barriers (region);
4598 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4599 calls. These can't be declared as const functions, but
4600 within one parallel body they are constant, so they can be
4601 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4602 which are declared const. Similarly for task body, except
4603 that in untied task omp_get_thread_num () can change at any task
4604 scheduling point. */
4606 static void
4607 optimize_omp_library_calls (gimple entry_stmt)
4609 basic_block bb;
4610 gimple_stmt_iterator gsi;
4611 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4612 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4613 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4614 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4615 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4616 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4617 OMP_CLAUSE_UNTIED) != NULL);
4619 FOR_EACH_BB_FN (bb, cfun)
4620 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4622 gimple call = gsi_stmt (gsi);
4623 tree decl;
4625 if (is_gimple_call (call)
4626 && (decl = gimple_call_fndecl (call))
4627 && DECL_EXTERNAL (decl)
4628 && TREE_PUBLIC (decl)
4629 && DECL_INITIAL (decl) == NULL)
4631 tree built_in;
4633 if (DECL_NAME (decl) == thr_num_id)
4635 /* In #pragma omp task untied omp_get_thread_num () can change
4636 during the execution of the task region. */
4637 if (untied_task)
4638 continue;
4639 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4641 else if (DECL_NAME (decl) == num_thr_id)
4642 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4643 else
4644 continue;
4646 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4647 || gimple_call_num_args (call) != 0)
4648 continue;
4650 if (flag_exceptions && !TREE_NOTHROW (decl))
4651 continue;
4653 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4654 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4655 TREE_TYPE (TREE_TYPE (built_in))))
4656 continue;
4658 gimple_call_set_fndecl (call, built_in);
4663 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4664 regimplified. */
4666 static tree
4667 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4669 tree t = *tp;
4671 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4672 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4673 return t;
4675 if (TREE_CODE (t) == ADDR_EXPR)
4676 recompute_tree_invariant_for_addr_expr (t);
4678 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4679 return NULL_TREE;
4682 /* Prepend TO = FROM assignment before *GSI_P. */
4684 static void
4685 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4687 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4688 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4689 true, GSI_SAME_STMT);
4690 gimple stmt = gimple_build_assign (to, from);
4691 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4692 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4693 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4695 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4696 gimple_regimplify_operands (stmt, &gsi);
4700 /* Expand the OpenMP parallel or task directive starting at REGION. */
4702 static void
4703 expand_omp_taskreg (struct omp_region *region)
4705 basic_block entry_bb, exit_bb, new_bb;
4706 struct function *child_cfun;
4707 tree child_fn, block, t;
4708 gimple_stmt_iterator gsi;
4709 gimple entry_stmt, stmt;
4710 edge e;
4711 vec<tree, va_gc> *ws_args;
4713 entry_stmt = last_stmt (region->entry);
4714 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4715 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4717 entry_bb = region->entry;
4718 exit_bb = region->exit;
4720 if (is_combined_parallel (region))
4721 ws_args = region->ws_args;
4722 else
4723 ws_args = NULL;
4725 if (child_cfun->cfg)
4727 /* Due to inlining, it may happen that we have already outlined
4728 the region, in which case all we need to do is make the
4729 sub-graph unreachable and emit the parallel call. */
4730 edge entry_succ_e, exit_succ_e;
4732 entry_succ_e = single_succ_edge (entry_bb);
4734 gsi = gsi_last_bb (entry_bb);
4735 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4736 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4737 gsi_remove (&gsi, true);
4739 new_bb = entry_bb;
4740 if (exit_bb)
4742 exit_succ_e = single_succ_edge (exit_bb);
4743 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4745 remove_edge_and_dominated_blocks (entry_succ_e);
4747 else
4749 unsigned srcidx, dstidx, num;
4751 /* If the parallel region needs data sent from the parent
4752 function, then the very first statement (except possible
4753 tree profile counter updates) of the parallel body
4754 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4755 &.OMP_DATA_O is passed as an argument to the child function,
4756 we need to replace it with the argument as seen by the child
4757 function.
4759 In most cases, this will end up being the identity assignment
4760 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4761 a function call that has been inlined, the original PARM_DECL
4762 .OMP_DATA_I may have been converted into a different local
4763 variable. In which case, we need to keep the assignment. */
4764 if (gimple_omp_taskreg_data_arg (entry_stmt))
4766 basic_block entry_succ_bb = single_succ (entry_bb);
4767 tree arg, narg;
4768 gimple parcopy_stmt = NULL;
4770 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4772 gimple stmt;
4774 gcc_assert (!gsi_end_p (gsi));
4775 stmt = gsi_stmt (gsi);
4776 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4777 continue;
4779 if (gimple_num_ops (stmt) == 2)
4781 tree arg = gimple_assign_rhs1 (stmt);
4783 /* We're ignore the subcode because we're
4784 effectively doing a STRIP_NOPS. */
4786 if (TREE_CODE (arg) == ADDR_EXPR
4787 && TREE_OPERAND (arg, 0)
4788 == gimple_omp_taskreg_data_arg (entry_stmt))
4790 parcopy_stmt = stmt;
4791 break;
4796 gcc_assert (parcopy_stmt != NULL);
4797 arg = DECL_ARGUMENTS (child_fn);
4799 if (!gimple_in_ssa_p (cfun))
4801 if (gimple_assign_lhs (parcopy_stmt) == arg)
4802 gsi_remove (&gsi, true);
4803 else
4805 /* ?? Is setting the subcode really necessary ?? */
4806 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4807 gimple_assign_set_rhs1 (parcopy_stmt, arg);
4810 else
4812 /* If we are in ssa form, we must load the value from the default
4813 definition of the argument. That should not be defined now,
4814 since the argument is not used uninitialized. */
4815 gcc_assert (ssa_default_def (cfun, arg) == NULL);
4816 narg = make_ssa_name (arg, gimple_build_nop ());
4817 set_ssa_default_def (cfun, arg, narg);
4818 /* ?? Is setting the subcode really necessary ?? */
4819 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
4820 gimple_assign_set_rhs1 (parcopy_stmt, narg);
4821 update_stmt (parcopy_stmt);
4825 /* Declare local variables needed in CHILD_CFUN. */
4826 block = DECL_INITIAL (child_fn);
4827 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
4828 /* The gimplifier could record temporaries in parallel/task block
4829 rather than in containing function's local_decls chain,
4830 which would mean cgraph missed finalizing them. Do it now. */
4831 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
4832 if (TREE_CODE (t) == VAR_DECL
4833 && TREE_STATIC (t)
4834 && !DECL_EXTERNAL (t))
4835 varpool_node::finalize_decl (t);
4836 DECL_SAVED_TREE (child_fn) = NULL;
4837 /* We'll create a CFG for child_fn, so no gimple body is needed. */
4838 gimple_set_body (child_fn, NULL);
4839 TREE_USED (block) = 1;
4841 /* Reset DECL_CONTEXT on function arguments. */
4842 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
4843 DECL_CONTEXT (t) = child_fn;
4845 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
4846 so that it can be moved to the child function. */
4847 gsi = gsi_last_bb (entry_bb);
4848 stmt = gsi_stmt (gsi);
4849 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
4850 || gimple_code (stmt) == GIMPLE_OMP_TASK));
4851 gsi_remove (&gsi, true);
4852 e = split_block (entry_bb, stmt);
4853 entry_bb = e->dest;
4854 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4856 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
4857 if (exit_bb)
4859 gsi = gsi_last_bb (exit_bb);
4860 gcc_assert (!gsi_end_p (gsi)
4861 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4862 stmt = gimple_build_return (NULL);
4863 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4864 gsi_remove (&gsi, true);
4867 /* Move the parallel region into CHILD_CFUN. */
4869 if (gimple_in_ssa_p (cfun))
4871 init_tree_ssa (child_cfun);
4872 init_ssa_operands (child_cfun);
4873 child_cfun->gimple_df->in_ssa_p = true;
4874 block = NULL_TREE;
4876 else
4877 block = gimple_block (entry_stmt);
4879 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
4880 if (exit_bb)
4881 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
4882 /* When the OMP expansion process cannot guarantee an up-to-date
4883 loop tree arrange for the child function to fixup loops. */
4884 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
4885 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
4887 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
4888 num = vec_safe_length (child_cfun->local_decls);
4889 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
4891 t = (*child_cfun->local_decls)[srcidx];
4892 if (DECL_CONTEXT (t) == cfun->decl)
4893 continue;
4894 if (srcidx != dstidx)
4895 (*child_cfun->local_decls)[dstidx] = t;
4896 dstidx++;
4898 if (dstidx != num)
4899 vec_safe_truncate (child_cfun->local_decls, dstidx);
4901 /* Inform the callgraph about the new function. */
4902 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
4903 cgraph_node::add_new_function (child_fn, true);
4905 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4906 fixed in a following pass. */
4907 push_cfun (child_cfun);
4908 if (optimize)
4909 optimize_omp_library_calls (entry_stmt);
4910 cgraph_edge::rebuild_edges ();
4912 /* Some EH regions might become dead, see PR34608. If
4913 pass_cleanup_cfg isn't the first pass to happen with the
4914 new child, these dead EH edges might cause problems.
4915 Clean them up now. */
4916 if (flag_exceptions)
4918 basic_block bb;
4919 bool changed = false;
4921 FOR_EACH_BB_FN (bb, cfun)
4922 changed |= gimple_purge_dead_eh_edges (bb);
4923 if (changed)
4924 cleanup_tree_cfg ();
4926 if (gimple_in_ssa_p (cfun))
4927 update_ssa (TODO_update_ssa);
4928 pop_cfun ();
4931 /* Emit a library call to launch the children threads. */
4932 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
4933 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
4934 else
4935 expand_task_call (new_bb, entry_stmt);
4936 if (gimple_in_ssa_p (cfun))
4937 update_ssa (TODO_update_ssa_only_virtuals);
4941 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4942 of the combined collapse > 1 loop constructs, generate code like:
4943 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4944 if (cond3 is <)
4945 adj = STEP3 - 1;
4946 else
4947 adj = STEP3 + 1;
4948 count3 = (adj + N32 - N31) / STEP3;
4949 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4950 if (cond2 is <)
4951 adj = STEP2 - 1;
4952 else
4953 adj = STEP2 + 1;
4954 count2 = (adj + N22 - N21) / STEP2;
4955 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4956 if (cond1 is <)
4957 adj = STEP1 - 1;
4958 else
4959 adj = STEP1 + 1;
4960 count1 = (adj + N12 - N11) / STEP1;
4961 count = count1 * count2 * count3;
4962 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4963 count = 0;
4964 and set ZERO_ITER_BB to that bb. If this isn't the outermost
4965 of the combined loop constructs, just initialize COUNTS array
4966 from the _looptemp_ clauses. */
4968 /* NOTE: It *could* be better to moosh all of the BBs together,
4969 creating one larger BB with all the computation and the unexpected
4970 jump at the end. I.e.
4972 bool zero3, zero2, zero1, zero;
4974 zero3 = N32 c3 N31;
4975 count3 = (N32 - N31) /[cl] STEP3;
4976 zero2 = N22 c2 N21;
4977 count2 = (N22 - N21) /[cl] STEP2;
4978 zero1 = N12 c1 N11;
4979 count1 = (N12 - N11) /[cl] STEP1;
4980 zero = zero3 || zero2 || zero1;
4981 count = count1 * count2 * count3;
4982 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4984 After all, we expect the zero=false, and thus we expect to have to
4985 evaluate all of the comparison expressions, so short-circuiting
4986 oughtn't be a win. Since the condition isn't protecting a
4987 denominator, we're not concerned about divide-by-zero, so we can
4988 fully evaluate count even if a numerator turned out to be wrong.
4990 It seems like putting this all together would create much better
4991 scheduling opportunities, and less pressure on the chip's branch
4992 predictor. */
4994 static void
4995 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4996 basic_block &entry_bb, tree *counts,
4997 basic_block &zero_iter_bb, int &first_zero_iter,
4998 basic_block &l2_dom_bb)
5000 tree t, type = TREE_TYPE (fd->loop.v);
5001 gimple stmt;
5002 edge e, ne;
5003 int i;
5005 /* Collapsed loops need work for expansion into SSA form. */
5006 gcc_assert (!gimple_in_ssa_p (cfun));
5008 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5009 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5011 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5012 isn't supposed to be handled, as the inner loop doesn't
5013 use it. */
5014 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5015 OMP_CLAUSE__LOOPTEMP_);
5016 gcc_assert (innerc);
5017 for (i = 0; i < fd->collapse; i++)
5019 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5020 OMP_CLAUSE__LOOPTEMP_);
5021 gcc_assert (innerc);
5022 if (i)
5023 counts[i] = OMP_CLAUSE_DECL (innerc);
5024 else
5025 counts[0] = NULL_TREE;
5027 return;
5030 for (i = 0; i < fd->collapse; i++)
5032 tree itype = TREE_TYPE (fd->loops[i].v);
5034 if (SSA_VAR_P (fd->loop.n2)
5035 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5036 fold_convert (itype, fd->loops[i].n1),
5037 fold_convert (itype, fd->loops[i].n2)))
5038 == NULL_TREE || !integer_onep (t)))
5040 tree n1, n2;
5041 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5042 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5043 true, GSI_SAME_STMT);
5044 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5045 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5046 true, GSI_SAME_STMT);
5047 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5048 NULL_TREE, NULL_TREE);
5049 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5050 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5051 expand_omp_regimplify_p, NULL, NULL)
5052 || walk_tree (gimple_cond_rhs_ptr (stmt),
5053 expand_omp_regimplify_p, NULL, NULL))
5055 *gsi = gsi_for_stmt (stmt);
5056 gimple_regimplify_operands (stmt, gsi);
5058 e = split_block (entry_bb, stmt);
5059 if (zero_iter_bb == NULL)
5061 first_zero_iter = i;
5062 zero_iter_bb = create_empty_bb (entry_bb);
5063 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5064 *gsi = gsi_after_labels (zero_iter_bb);
5065 stmt = gimple_build_assign (fd->loop.n2,
5066 build_zero_cst (type));
5067 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5068 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5069 entry_bb);
5071 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5072 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5073 e->flags = EDGE_TRUE_VALUE;
5074 e->probability = REG_BR_PROB_BASE - ne->probability;
5075 if (l2_dom_bb == NULL)
5076 l2_dom_bb = entry_bb;
5077 entry_bb = e->dest;
5078 *gsi = gsi_last_bb (entry_bb);
5081 if (POINTER_TYPE_P (itype))
5082 itype = signed_type_for (itype);
5083 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5084 ? -1 : 1));
5085 t = fold_build2 (PLUS_EXPR, itype,
5086 fold_convert (itype, fd->loops[i].step), t);
5087 t = fold_build2 (PLUS_EXPR, itype, t,
5088 fold_convert (itype, fd->loops[i].n2));
5089 t = fold_build2 (MINUS_EXPR, itype, t,
5090 fold_convert (itype, fd->loops[i].n1));
5091 /* ?? We could probably use CEIL_DIV_EXPR instead of
5092 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5093 generate the same code in the end because generically we
5094 don't know that the values involved must be negative for
5095 GT?? */
5096 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5097 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5098 fold_build1 (NEGATE_EXPR, itype, t),
5099 fold_build1 (NEGATE_EXPR, itype,
5100 fold_convert (itype,
5101 fd->loops[i].step)));
5102 else
5103 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5104 fold_convert (itype, fd->loops[i].step));
5105 t = fold_convert (type, t);
5106 if (TREE_CODE (t) == INTEGER_CST)
5107 counts[i] = t;
5108 else
5110 counts[i] = create_tmp_reg (type, ".count");
5111 expand_omp_build_assign (gsi, counts[i], t);
5113 if (SSA_VAR_P (fd->loop.n2))
5115 if (i == 0)
5116 t = counts[0];
5117 else
5118 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5119 expand_omp_build_assign (gsi, fd->loop.n2, t);
5125 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5126 T = V;
5127 V3 = N31 + (T % count3) * STEP3;
5128 T = T / count3;
5129 V2 = N21 + (T % count2) * STEP2;
5130 T = T / count2;
5131 V1 = N11 + T * STEP1;
5132 if this loop doesn't have an inner loop construct combined with it.
5133 If it does have an inner loop construct combined with it and the
5134 iteration count isn't known constant, store values from counts array
5135 into its _looptemp_ temporaries instead. */
5137 static void
5138 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5139 tree *counts, gimple inner_stmt, tree startvar)
5141 int i;
5142 if (gimple_omp_for_combined_p (fd->for_stmt))
5144 /* If fd->loop.n2 is constant, then no propagation of the counts
5145 is needed, they are constant. */
5146 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5147 return;
5149 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5150 ? gimple_omp_parallel_clauses (inner_stmt)
5151 : gimple_omp_for_clauses (inner_stmt);
5152 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5153 isn't supposed to be handled, as the inner loop doesn't
5154 use it. */
5155 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5156 gcc_assert (innerc);
5157 for (i = 0; i < fd->collapse; i++)
5159 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5160 OMP_CLAUSE__LOOPTEMP_);
5161 gcc_assert (innerc);
5162 if (i)
5164 tree tem = OMP_CLAUSE_DECL (innerc);
5165 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5166 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5167 false, GSI_CONTINUE_LINKING);
5168 gimple stmt = gimple_build_assign (tem, t);
5169 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5172 return;
5175 tree type = TREE_TYPE (fd->loop.v);
5176 tree tem = create_tmp_reg (type, ".tem");
5177 gimple stmt = gimple_build_assign (tem, startvar);
5178 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5180 for (i = fd->collapse - 1; i >= 0; i--)
5182 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5183 itype = vtype;
5184 if (POINTER_TYPE_P (vtype))
5185 itype = signed_type_for (vtype);
5186 if (i != 0)
5187 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5188 else
5189 t = tem;
5190 t = fold_convert (itype, t);
5191 t = fold_build2 (MULT_EXPR, itype, t,
5192 fold_convert (itype, fd->loops[i].step));
5193 if (POINTER_TYPE_P (vtype))
5194 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5195 else
5196 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5197 t = force_gimple_operand_gsi (gsi, t,
5198 DECL_P (fd->loops[i].v)
5199 && TREE_ADDRESSABLE (fd->loops[i].v),
5200 NULL_TREE, false,
5201 GSI_CONTINUE_LINKING);
5202 stmt = gimple_build_assign (fd->loops[i].v, t);
5203 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5204 if (i != 0)
5206 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5207 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5208 false, GSI_CONTINUE_LINKING);
5209 stmt = gimple_build_assign (tem, t);
5210 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5216 /* Helper function for expand_omp_for_*. Generate code like:
5217 L10:
5218 V3 += STEP3;
5219 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5220 L11:
5221 V3 = N31;
5222 V2 += STEP2;
5223 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5224 L12:
5225 V2 = N21;
5226 V1 += STEP1;
5227 goto BODY_BB; */
5229 static basic_block
5230 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5231 basic_block body_bb)
5233 basic_block last_bb, bb, collapse_bb = NULL;
5234 int i;
5235 gimple_stmt_iterator gsi;
5236 edge e;
5237 tree t;
5238 gimple stmt;
5240 last_bb = cont_bb;
5241 for (i = fd->collapse - 1; i >= 0; i--)
5243 tree vtype = TREE_TYPE (fd->loops[i].v);
5245 bb = create_empty_bb (last_bb);
5246 add_bb_to_loop (bb, last_bb->loop_father);
5247 gsi = gsi_start_bb (bb);
5249 if (i < fd->collapse - 1)
5251 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5252 e->probability = REG_BR_PROB_BASE / 8;
5254 t = fd->loops[i + 1].n1;
5255 t = force_gimple_operand_gsi (&gsi, t,
5256 DECL_P (fd->loops[i + 1].v)
5257 && TREE_ADDRESSABLE (fd->loops[i
5258 + 1].v),
5259 NULL_TREE, false,
5260 GSI_CONTINUE_LINKING);
5261 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5262 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5264 else
5265 collapse_bb = bb;
5267 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5269 if (POINTER_TYPE_P (vtype))
5270 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5271 else
5272 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5273 t = force_gimple_operand_gsi (&gsi, t,
5274 DECL_P (fd->loops[i].v)
5275 && TREE_ADDRESSABLE (fd->loops[i].v),
5276 NULL_TREE, false, GSI_CONTINUE_LINKING);
5277 stmt = gimple_build_assign (fd->loops[i].v, t);
5278 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5280 if (i > 0)
5282 t = fd->loops[i].n2;
5283 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5284 false, GSI_CONTINUE_LINKING);
5285 tree v = fd->loops[i].v;
5286 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5287 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5288 false, GSI_CONTINUE_LINKING);
5289 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5290 stmt = gimple_build_cond_empty (t);
5291 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5292 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5293 e->probability = REG_BR_PROB_BASE * 7 / 8;
5295 else
5296 make_edge (bb, body_bb, EDGE_FALLTHRU);
5297 last_bb = bb;
5300 return collapse_bb;
5304 /* A subroutine of expand_omp_for. Generate code for a parallel
5305 loop with any schedule. Given parameters:
5307 for (V = N1; V cond N2; V += STEP) BODY;
5309 where COND is "<" or ">", we generate pseudocode
5311 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5312 if (more) goto L0; else goto L3;
5314 V = istart0;
5315 iend = iend0;
5317 BODY;
5318 V += STEP;
5319 if (V cond iend) goto L1; else goto L2;
5321 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5324 If this is a combined omp parallel loop, instead of the call to
5325 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5326 If this is gimple_omp_for_combined_p loop, then instead of assigning
5327 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5328 inner GIMPLE_OMP_FOR and V += STEP; and
5329 if (V cond iend) goto L1; else goto L2; are removed.
5331 For collapsed loops, given parameters:
5332 collapse(3)
5333 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5334 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5335 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5336 BODY;
5338 we generate pseudocode
5340 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5341 if (cond3 is <)
5342 adj = STEP3 - 1;
5343 else
5344 adj = STEP3 + 1;
5345 count3 = (adj + N32 - N31) / STEP3;
5346 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5347 if (cond2 is <)
5348 adj = STEP2 - 1;
5349 else
5350 adj = STEP2 + 1;
5351 count2 = (adj + N22 - N21) / STEP2;
5352 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5353 if (cond1 is <)
5354 adj = STEP1 - 1;
5355 else
5356 adj = STEP1 + 1;
5357 count1 = (adj + N12 - N11) / STEP1;
5358 count = count1 * count2 * count3;
5359 goto Z1;
5361 count = 0;
5363 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5364 if (more) goto L0; else goto L3;
5366 V = istart0;
5367 T = V;
5368 V3 = N31 + (T % count3) * STEP3;
5369 T = T / count3;
5370 V2 = N21 + (T % count2) * STEP2;
5371 T = T / count2;
5372 V1 = N11 + T * STEP1;
5373 iend = iend0;
5375 BODY;
5376 V += 1;
5377 if (V < iend) goto L10; else goto L2;
5378 L10:
5379 V3 += STEP3;
5380 if (V3 cond3 N32) goto L1; else goto L11;
5381 L11:
5382 V3 = N31;
5383 V2 += STEP2;
5384 if (V2 cond2 N22) goto L1; else goto L12;
5385 L12:
5386 V2 = N21;
5387 V1 += STEP1;
5388 goto L1;
5390 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5395 static void
5396 expand_omp_for_generic (struct omp_region *region,
5397 struct omp_for_data *fd,
5398 enum built_in_function start_fn,
5399 enum built_in_function next_fn,
5400 gimple inner_stmt)
5402 tree type, istart0, iend0, iend;
5403 tree t, vmain, vback, bias = NULL_TREE;
5404 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5405 basic_block l2_bb = NULL, l3_bb = NULL;
5406 gimple_stmt_iterator gsi;
5407 gimple stmt;
5408 bool in_combined_parallel = is_combined_parallel (region);
5409 bool broken_loop = region->cont == NULL;
5410 edge e, ne;
5411 tree *counts = NULL;
5412 int i;
5414 gcc_assert (!broken_loop || !in_combined_parallel);
5415 gcc_assert (fd->iter_type == long_integer_type_node
5416 || !in_combined_parallel);
5418 type = TREE_TYPE (fd->loop.v);
5419 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5420 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5421 TREE_ADDRESSABLE (istart0) = 1;
5422 TREE_ADDRESSABLE (iend0) = 1;
5424 /* See if we need to bias by LLONG_MIN. */
5425 if (fd->iter_type == long_long_unsigned_type_node
5426 && TREE_CODE (type) == INTEGER_TYPE
5427 && !TYPE_UNSIGNED (type))
5429 tree n1, n2;
5431 if (fd->loop.cond_code == LT_EXPR)
5433 n1 = fd->loop.n1;
5434 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5436 else
5438 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5439 n2 = fd->loop.n1;
5441 if (TREE_CODE (n1) != INTEGER_CST
5442 || TREE_CODE (n2) != INTEGER_CST
5443 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5444 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5447 entry_bb = region->entry;
5448 cont_bb = region->cont;
5449 collapse_bb = NULL;
5450 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5451 gcc_assert (broken_loop
5452 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5453 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5454 l1_bb = single_succ (l0_bb);
5455 if (!broken_loop)
5457 l2_bb = create_empty_bb (cont_bb);
5458 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5459 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5461 else
5462 l2_bb = NULL;
5463 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5464 exit_bb = region->exit;
5466 gsi = gsi_last_bb (entry_bb);
5468 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5469 if (fd->collapse > 1)
5471 int first_zero_iter = -1;
5472 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5474 counts = XALLOCAVEC (tree, fd->collapse);
5475 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5476 zero_iter_bb, first_zero_iter,
5477 l2_dom_bb);
5479 if (zero_iter_bb)
5481 /* Some counts[i] vars might be uninitialized if
5482 some loop has zero iterations. But the body shouldn't
5483 be executed in that case, so just avoid uninit warnings. */
5484 for (i = first_zero_iter; i < fd->collapse; i++)
5485 if (SSA_VAR_P (counts[i]))
5486 TREE_NO_WARNING (counts[i]) = 1;
5487 gsi_prev (&gsi);
5488 e = split_block (entry_bb, gsi_stmt (gsi));
5489 entry_bb = e->dest;
5490 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5491 gsi = gsi_last_bb (entry_bb);
5492 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5493 get_immediate_dominator (CDI_DOMINATORS,
5494 zero_iter_bb));
5497 if (in_combined_parallel)
5499 /* In a combined parallel loop, emit a call to
5500 GOMP_loop_foo_next. */
5501 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5502 build_fold_addr_expr (istart0),
5503 build_fold_addr_expr (iend0));
5505 else
5507 tree t0, t1, t2, t3, t4;
5508 /* If this is not a combined parallel loop, emit a call to
5509 GOMP_loop_foo_start in ENTRY_BB. */
5510 t4 = build_fold_addr_expr (iend0);
5511 t3 = build_fold_addr_expr (istart0);
5512 t2 = fold_convert (fd->iter_type, fd->loop.step);
5513 t1 = fd->loop.n2;
5514 t0 = fd->loop.n1;
5515 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5517 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5518 OMP_CLAUSE__LOOPTEMP_);
5519 gcc_assert (innerc);
5520 t0 = OMP_CLAUSE_DECL (innerc);
5521 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5522 OMP_CLAUSE__LOOPTEMP_);
5523 gcc_assert (innerc);
5524 t1 = OMP_CLAUSE_DECL (innerc);
5526 if (POINTER_TYPE_P (TREE_TYPE (t0))
5527 && TYPE_PRECISION (TREE_TYPE (t0))
5528 != TYPE_PRECISION (fd->iter_type))
5530 /* Avoid casting pointers to integer of a different size. */
5531 tree itype = signed_type_for (type);
5532 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5533 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5535 else
5537 t1 = fold_convert (fd->iter_type, t1);
5538 t0 = fold_convert (fd->iter_type, t0);
5540 if (bias)
5542 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5543 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5545 if (fd->iter_type == long_integer_type_node)
5547 if (fd->chunk_size)
5549 t = fold_convert (fd->iter_type, fd->chunk_size);
5550 t = build_call_expr (builtin_decl_explicit (start_fn),
5551 6, t0, t1, t2, t, t3, t4);
5553 else
5554 t = build_call_expr (builtin_decl_explicit (start_fn),
5555 5, t0, t1, t2, t3, t4);
5557 else
5559 tree t5;
5560 tree c_bool_type;
5561 tree bfn_decl;
5563 /* The GOMP_loop_ull_*start functions have additional boolean
5564 argument, true for < loops and false for > loops.
5565 In Fortran, the C bool type can be different from
5566 boolean_type_node. */
5567 bfn_decl = builtin_decl_explicit (start_fn);
5568 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5569 t5 = build_int_cst (c_bool_type,
5570 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5571 if (fd->chunk_size)
5573 tree bfn_decl = builtin_decl_explicit (start_fn);
5574 t = fold_convert (fd->iter_type, fd->chunk_size);
5575 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5577 else
5578 t = build_call_expr (builtin_decl_explicit (start_fn),
5579 6, t5, t0, t1, t2, t3, t4);
5582 if (TREE_TYPE (t) != boolean_type_node)
5583 t = fold_build2 (NE_EXPR, boolean_type_node,
5584 t, build_int_cst (TREE_TYPE (t), 0));
5585 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5586 true, GSI_SAME_STMT);
5587 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5589 /* Remove the GIMPLE_OMP_FOR statement. */
5590 gsi_remove (&gsi, true);
5592 /* Iteration setup for sequential loop goes in L0_BB. */
5593 tree startvar = fd->loop.v;
5594 tree endvar = NULL_TREE;
5596 if (gimple_omp_for_combined_p (fd->for_stmt))
5598 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5599 && gimple_omp_for_kind (inner_stmt)
5600 == GF_OMP_FOR_KIND_SIMD);
5601 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5602 OMP_CLAUSE__LOOPTEMP_);
5603 gcc_assert (innerc);
5604 startvar = OMP_CLAUSE_DECL (innerc);
5605 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5606 OMP_CLAUSE__LOOPTEMP_);
5607 gcc_assert (innerc);
5608 endvar = OMP_CLAUSE_DECL (innerc);
5611 gsi = gsi_start_bb (l0_bb);
5612 t = istart0;
5613 if (bias)
5614 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5615 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5616 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5617 t = fold_convert (TREE_TYPE (startvar), t);
5618 t = force_gimple_operand_gsi (&gsi, t,
5619 DECL_P (startvar)
5620 && TREE_ADDRESSABLE (startvar),
5621 NULL_TREE, false, GSI_CONTINUE_LINKING);
5622 stmt = gimple_build_assign (startvar, t);
5623 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5625 t = iend0;
5626 if (bias)
5627 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5628 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5629 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5630 t = fold_convert (TREE_TYPE (startvar), t);
5631 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5632 false, GSI_CONTINUE_LINKING);
5633 if (endvar)
5635 stmt = gimple_build_assign (endvar, iend);
5636 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5637 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
5638 stmt = gimple_build_assign (fd->loop.v, iend);
5639 else
5640 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, iend,
5641 NULL_TREE);
5642 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5644 if (fd->collapse > 1)
5645 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5647 if (!broken_loop)
5649 /* Code to control the increment and predicate for the sequential
5650 loop goes in the CONT_BB. */
5651 gsi = gsi_last_bb (cont_bb);
5652 stmt = gsi_stmt (gsi);
5653 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5654 vmain = gimple_omp_continue_control_use (stmt);
5655 vback = gimple_omp_continue_control_def (stmt);
5657 if (!gimple_omp_for_combined_p (fd->for_stmt))
5659 if (POINTER_TYPE_P (type))
5660 t = fold_build_pointer_plus (vmain, fd->loop.step);
5661 else
5662 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5663 t = force_gimple_operand_gsi (&gsi, t,
5664 DECL_P (vback)
5665 && TREE_ADDRESSABLE (vback),
5666 NULL_TREE, true, GSI_SAME_STMT);
5667 stmt = gimple_build_assign (vback, t);
5668 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5670 t = build2 (fd->loop.cond_code, boolean_type_node,
5671 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5672 iend);
5673 stmt = gimple_build_cond_empty (t);
5674 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5677 /* Remove GIMPLE_OMP_CONTINUE. */
5678 gsi_remove (&gsi, true);
5680 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5681 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5683 /* Emit code to get the next parallel iteration in L2_BB. */
5684 gsi = gsi_start_bb (l2_bb);
5686 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5687 build_fold_addr_expr (istart0),
5688 build_fold_addr_expr (iend0));
5689 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5690 false, GSI_CONTINUE_LINKING);
5691 if (TREE_TYPE (t) != boolean_type_node)
5692 t = fold_build2 (NE_EXPR, boolean_type_node,
5693 t, build_int_cst (TREE_TYPE (t), 0));
5694 stmt = gimple_build_cond_empty (t);
5695 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5698 /* Add the loop cleanup function. */
5699 gsi = gsi_last_bb (exit_bb);
5700 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5701 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5702 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5703 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5704 else
5705 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5706 stmt = gimple_build_call (t, 0);
5707 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5708 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5709 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5710 gsi_remove (&gsi, true);
5712 /* Connect the new blocks. */
5713 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5714 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5716 if (!broken_loop)
5718 gimple_seq phis;
5720 e = find_edge (cont_bb, l3_bb);
5721 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5723 phis = phi_nodes (l3_bb);
5724 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5726 gimple phi = gsi_stmt (gsi);
5727 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5728 PHI_ARG_DEF_FROM_EDGE (phi, e));
5730 remove_edge (e);
5732 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5733 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5734 e = find_edge (cont_bb, l1_bb);
5735 if (gimple_omp_for_combined_p (fd->for_stmt))
5737 remove_edge (e);
5738 e = NULL;
5740 else if (fd->collapse > 1)
5742 remove_edge (e);
5743 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5745 else
5746 e->flags = EDGE_TRUE_VALUE;
5747 if (e)
5749 e->probability = REG_BR_PROB_BASE * 7 / 8;
5750 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5752 else
5754 e = find_edge (cont_bb, l2_bb);
5755 e->flags = EDGE_FALLTHRU;
5757 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5759 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5760 recompute_dominator (CDI_DOMINATORS, l2_bb));
5761 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5762 recompute_dominator (CDI_DOMINATORS, l3_bb));
5763 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5764 recompute_dominator (CDI_DOMINATORS, l0_bb));
5765 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5766 recompute_dominator (CDI_DOMINATORS, l1_bb));
5768 struct loop *outer_loop = alloc_loop ();
5769 outer_loop->header = l0_bb;
5770 outer_loop->latch = l2_bb;
5771 add_loop (outer_loop, l0_bb->loop_father);
5773 if (!gimple_omp_for_combined_p (fd->for_stmt))
5775 struct loop *loop = alloc_loop ();
5776 loop->header = l1_bb;
5777 /* The loop may have multiple latches. */
5778 add_loop (loop, outer_loop);
5784 /* A subroutine of expand_omp_for. Generate code for a parallel
5785 loop with static schedule and no specified chunk size. Given
5786 parameters:
5788 for (V = N1; V cond N2; V += STEP) BODY;
5790 where COND is "<" or ">", we generate pseudocode
5792 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5793 if (cond is <)
5794 adj = STEP - 1;
5795 else
5796 adj = STEP + 1;
5797 if ((__typeof (V)) -1 > 0 && cond is >)
5798 n = -(adj + N2 - N1) / -STEP;
5799 else
5800 n = (adj + N2 - N1) / STEP;
5801 q = n / nthreads;
5802 tt = n % nthreads;
5803 if (threadid < tt) goto L3; else goto L4;
5805 tt = 0;
5806 q = q + 1;
5808 s0 = q * threadid + tt;
5809 e0 = s0 + q;
5810 V = s0 * STEP + N1;
5811 if (s0 >= e0) goto L2; else goto L0;
5813 e = e0 * STEP + N1;
5815 BODY;
5816 V += STEP;
5817 if (V cond e) goto L1;
5821 static void
5822 expand_omp_for_static_nochunk (struct omp_region *region,
5823 struct omp_for_data *fd,
5824 gimple inner_stmt)
5826 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
5827 tree type, itype, vmain, vback;
5828 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
5829 basic_block body_bb, cont_bb, collapse_bb = NULL;
5830 basic_block fin_bb;
5831 gimple_stmt_iterator gsi;
5832 gimple stmt;
5833 edge ep;
5834 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
5835 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
5836 bool broken_loop = region->cont == NULL;
5837 tree *counts = NULL;
5838 tree n1, n2, step;
5840 itype = type = TREE_TYPE (fd->loop.v);
5841 if (POINTER_TYPE_P (type))
5842 itype = signed_type_for (type);
5844 entry_bb = region->entry;
5845 cont_bb = region->cont;
5846 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5847 fin_bb = BRANCH_EDGE (entry_bb)->dest;
5848 gcc_assert (broken_loop
5849 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
5850 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5851 body_bb = single_succ (seq_start_bb);
5852 if (!broken_loop)
5854 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5855 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5857 exit_bb = region->exit;
5859 /* Iteration space partitioning goes in ENTRY_BB. */
5860 gsi = gsi_last_bb (entry_bb);
5861 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5863 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
5865 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
5866 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
5869 if (fd->collapse > 1)
5871 int first_zero_iter = -1;
5872 basic_block l2_dom_bb = NULL;
5874 counts = XALLOCAVEC (tree, fd->collapse);
5875 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5876 fin_bb, first_zero_iter,
5877 l2_dom_bb);
5878 t = NULL_TREE;
5880 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
5881 t = integer_one_node;
5882 else
5883 t = fold_binary (fd->loop.cond_code, boolean_type_node,
5884 fold_convert (type, fd->loop.n1),
5885 fold_convert (type, fd->loop.n2));
5886 if (fd->collapse == 1
5887 && TYPE_UNSIGNED (type)
5888 && (t == NULL_TREE || !integer_onep (t)))
5890 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
5891 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
5892 true, GSI_SAME_STMT);
5893 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
5894 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
5895 true, GSI_SAME_STMT);
5896 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
5897 NULL_TREE, NULL_TREE);
5898 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5899 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5900 expand_omp_regimplify_p, NULL, NULL)
5901 || walk_tree (gimple_cond_rhs_ptr (stmt),
5902 expand_omp_regimplify_p, NULL, NULL))
5904 gsi = gsi_for_stmt (stmt);
5905 gimple_regimplify_operands (stmt, &gsi);
5907 ep = split_block (entry_bb, stmt);
5908 ep->flags = EDGE_TRUE_VALUE;
5909 entry_bb = ep->dest;
5910 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
5911 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
5912 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
5913 if (gimple_in_ssa_p (cfun))
5915 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
5916 for (gsi = gsi_start_phis (fin_bb);
5917 !gsi_end_p (gsi); gsi_next (&gsi))
5919 gimple phi = gsi_stmt (gsi);
5920 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
5921 ep, UNKNOWN_LOCATION);
5924 gsi = gsi_last_bb (entry_bb);
5927 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
5928 t = fold_convert (itype, t);
5929 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5930 true, GSI_SAME_STMT);
5932 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
5933 t = fold_convert (itype, t);
5934 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5935 true, GSI_SAME_STMT);
5937 n1 = fd->loop.n1;
5938 n2 = fd->loop.n2;
5939 step = fd->loop.step;
5940 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5942 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5943 OMP_CLAUSE__LOOPTEMP_);
5944 gcc_assert (innerc);
5945 n1 = OMP_CLAUSE_DECL (innerc);
5946 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5947 OMP_CLAUSE__LOOPTEMP_);
5948 gcc_assert (innerc);
5949 n2 = OMP_CLAUSE_DECL (innerc);
5951 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
5952 true, NULL_TREE, true, GSI_SAME_STMT);
5953 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
5954 true, NULL_TREE, true, GSI_SAME_STMT);
5955 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
5956 true, NULL_TREE, true, GSI_SAME_STMT);
5958 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
5959 t = fold_build2 (PLUS_EXPR, itype, step, t);
5960 t = fold_build2 (PLUS_EXPR, itype, t, n2);
5961 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
5962 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
5963 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5964 fold_build1 (NEGATE_EXPR, itype, t),
5965 fold_build1 (NEGATE_EXPR, itype, step));
5966 else
5967 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
5968 t = fold_convert (itype, t);
5969 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
5971 q = create_tmp_reg (itype, "q");
5972 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
5973 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5974 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
5976 tt = create_tmp_reg (itype, "tt");
5977 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
5978 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5979 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
5981 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
5982 stmt = gimple_build_cond_empty (t);
5983 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5985 second_bb = split_block (entry_bb, stmt)->dest;
5986 gsi = gsi_last_bb (second_bb);
5987 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5989 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
5990 GSI_SAME_STMT);
5991 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
5992 build_int_cst (itype, 1));
5993 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5995 third_bb = split_block (second_bb, stmt)->dest;
5996 gsi = gsi_last_bb (third_bb);
5997 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5999 t = build2 (MULT_EXPR, itype, q, threadid);
6000 t = build2 (PLUS_EXPR, itype, t, tt);
6001 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6003 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6004 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6006 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6007 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6009 /* Remove the GIMPLE_OMP_FOR statement. */
6010 gsi_remove (&gsi, true);
6012 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6013 gsi = gsi_start_bb (seq_start_bb);
6015 tree startvar = fd->loop.v;
6016 tree endvar = NULL_TREE;
6018 if (gimple_omp_for_combined_p (fd->for_stmt))
6020 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6021 ? gimple_omp_parallel_clauses (inner_stmt)
6022 : gimple_omp_for_clauses (inner_stmt);
6023 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6024 gcc_assert (innerc);
6025 startvar = OMP_CLAUSE_DECL (innerc);
6026 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6027 OMP_CLAUSE__LOOPTEMP_);
6028 gcc_assert (innerc);
6029 endvar = OMP_CLAUSE_DECL (innerc);
6031 t = fold_convert (itype, s0);
6032 t = fold_build2 (MULT_EXPR, itype, t, step);
6033 if (POINTER_TYPE_P (type))
6034 t = fold_build_pointer_plus (n1, t);
6035 else
6036 t = fold_build2 (PLUS_EXPR, type, t, n1);
6037 t = fold_convert (TREE_TYPE (startvar), t);
6038 t = force_gimple_operand_gsi (&gsi, t,
6039 DECL_P (startvar)
6040 && TREE_ADDRESSABLE (startvar),
6041 NULL_TREE, false, GSI_CONTINUE_LINKING);
6042 stmt = gimple_build_assign (startvar, t);
6043 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6045 t = fold_convert (itype, e0);
6046 t = fold_build2 (MULT_EXPR, itype, t, step);
6047 if (POINTER_TYPE_P (type))
6048 t = fold_build_pointer_plus (n1, t);
6049 else
6050 t = fold_build2 (PLUS_EXPR, type, t, n1);
6051 t = fold_convert (TREE_TYPE (startvar), t);
6052 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6053 false, GSI_CONTINUE_LINKING);
6054 if (endvar)
6056 stmt = gimple_build_assign (endvar, e);
6057 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6058 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6059 stmt = gimple_build_assign (fd->loop.v, e);
6060 else
6061 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6062 NULL_TREE);
6063 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6065 if (fd->collapse > 1)
6066 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6068 if (!broken_loop)
6070 /* The code controlling the sequential loop replaces the
6071 GIMPLE_OMP_CONTINUE. */
6072 gsi = gsi_last_bb (cont_bb);
6073 stmt = gsi_stmt (gsi);
6074 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6075 vmain = gimple_omp_continue_control_use (stmt);
6076 vback = gimple_omp_continue_control_def (stmt);
6078 if (!gimple_omp_for_combined_p (fd->for_stmt))
6080 if (POINTER_TYPE_P (type))
6081 t = fold_build_pointer_plus (vmain, step);
6082 else
6083 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6084 t = force_gimple_operand_gsi (&gsi, t,
6085 DECL_P (vback)
6086 && TREE_ADDRESSABLE (vback),
6087 NULL_TREE, true, GSI_SAME_STMT);
6088 stmt = gimple_build_assign (vback, t);
6089 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6091 t = build2 (fd->loop.cond_code, boolean_type_node,
6092 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6093 ? t : vback, e);
6094 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6097 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6098 gsi_remove (&gsi, true);
6100 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6101 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6104 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6105 gsi = gsi_last_bb (exit_bb);
6106 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6108 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6109 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6111 gsi_remove (&gsi, true);
6113 /* Connect all the blocks. */
6114 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6115 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6116 ep = find_edge (entry_bb, second_bb);
6117 ep->flags = EDGE_TRUE_VALUE;
6118 ep->probability = REG_BR_PROB_BASE / 4;
6119 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6120 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6122 if (!broken_loop)
6124 ep = find_edge (cont_bb, body_bb);
6125 if (gimple_omp_for_combined_p (fd->for_stmt))
6127 remove_edge (ep);
6128 ep = NULL;
6130 else if (fd->collapse > 1)
6132 remove_edge (ep);
6133 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6135 else
6136 ep->flags = EDGE_TRUE_VALUE;
6137 find_edge (cont_bb, fin_bb)->flags
6138 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6141 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6142 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6143 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6145 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6146 recompute_dominator (CDI_DOMINATORS, body_bb));
6147 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6148 recompute_dominator (CDI_DOMINATORS, fin_bb));
6150 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6152 struct loop *loop = alloc_loop ();
6153 loop->header = body_bb;
6154 if (collapse_bb == NULL)
6155 loop->latch = cont_bb;
6156 add_loop (loop, body_bb->loop_father);
6161 /* A subroutine of expand_omp_for. Generate code for a parallel
6162 loop with static schedule and a specified chunk size. Given
6163 parameters:
6165 for (V = N1; V cond N2; V += STEP) BODY;
6167 where COND is "<" or ">", we generate pseudocode
6169 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6170 if (cond is <)
6171 adj = STEP - 1;
6172 else
6173 adj = STEP + 1;
6174 if ((__typeof (V)) -1 > 0 && cond is >)
6175 n = -(adj + N2 - N1) / -STEP;
6176 else
6177 n = (adj + N2 - N1) / STEP;
6178 trip = 0;
6179 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6180 here so that V is defined
6181 if the loop is not entered
6183 s0 = (trip * nthreads + threadid) * CHUNK;
6184 e0 = min(s0 + CHUNK, n);
6185 if (s0 < n) goto L1; else goto L4;
6187 V = s0 * STEP + N1;
6188 e = e0 * STEP + N1;
6190 BODY;
6191 V += STEP;
6192 if (V cond e) goto L2; else goto L3;
6194 trip += 1;
6195 goto L0;
6199 static void
6200 expand_omp_for_static_chunk (struct omp_region *region,
6201 struct omp_for_data *fd, gimple inner_stmt)
6203 tree n, s0, e0, e, t;
6204 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6205 tree type, itype, vmain, vback, vextra;
6206 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6207 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6208 gimple_stmt_iterator gsi;
6209 gimple stmt;
6210 edge se;
6211 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6212 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6213 bool broken_loop = region->cont == NULL;
6214 tree *counts = NULL;
6215 tree n1, n2, step;
6217 itype = type = TREE_TYPE (fd->loop.v);
6218 if (POINTER_TYPE_P (type))
6219 itype = signed_type_for (type);
6221 entry_bb = region->entry;
6222 se = split_block (entry_bb, last_stmt (entry_bb));
6223 entry_bb = se->src;
6224 iter_part_bb = se->dest;
6225 cont_bb = region->cont;
6226 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6227 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6228 gcc_assert (broken_loop
6229 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6230 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6231 body_bb = single_succ (seq_start_bb);
6232 if (!broken_loop)
6234 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6235 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6236 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6238 exit_bb = region->exit;
6240 /* Trip and adjustment setup goes in ENTRY_BB. */
6241 gsi = gsi_last_bb (entry_bb);
6242 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6244 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6246 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6247 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6250 if (fd->collapse > 1)
6252 int first_zero_iter = -1;
6253 basic_block l2_dom_bb = NULL;
6255 counts = XALLOCAVEC (tree, fd->collapse);
6256 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6257 fin_bb, first_zero_iter,
6258 l2_dom_bb);
6259 t = NULL_TREE;
6261 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6262 t = integer_one_node;
6263 else
6264 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6265 fold_convert (type, fd->loop.n1),
6266 fold_convert (type, fd->loop.n2));
6267 if (fd->collapse == 1
6268 && TYPE_UNSIGNED (type)
6269 && (t == NULL_TREE || !integer_onep (t)))
6271 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6272 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6273 true, GSI_SAME_STMT);
6274 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6275 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6276 true, GSI_SAME_STMT);
6277 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6278 NULL_TREE, NULL_TREE);
6279 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6280 if (walk_tree (gimple_cond_lhs_ptr (stmt),
6281 expand_omp_regimplify_p, NULL, NULL)
6282 || walk_tree (gimple_cond_rhs_ptr (stmt),
6283 expand_omp_regimplify_p, NULL, NULL))
6285 gsi = gsi_for_stmt (stmt);
6286 gimple_regimplify_operands (stmt, &gsi);
6288 se = split_block (entry_bb, stmt);
6289 se->flags = EDGE_TRUE_VALUE;
6290 entry_bb = se->dest;
6291 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6292 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6293 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6294 if (gimple_in_ssa_p (cfun))
6296 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6297 for (gsi = gsi_start_phis (fin_bb);
6298 !gsi_end_p (gsi); gsi_next (&gsi))
6300 gimple phi = gsi_stmt (gsi);
6301 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6302 se, UNKNOWN_LOCATION);
6305 gsi = gsi_last_bb (entry_bb);
6308 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6309 t = fold_convert (itype, t);
6310 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6311 true, GSI_SAME_STMT);
6313 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6314 t = fold_convert (itype, t);
6315 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6316 true, GSI_SAME_STMT);
6318 n1 = fd->loop.n1;
6319 n2 = fd->loop.n2;
6320 step = fd->loop.step;
6321 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6323 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6324 OMP_CLAUSE__LOOPTEMP_);
6325 gcc_assert (innerc);
6326 n1 = OMP_CLAUSE_DECL (innerc);
6327 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6328 OMP_CLAUSE__LOOPTEMP_);
6329 gcc_assert (innerc);
6330 n2 = OMP_CLAUSE_DECL (innerc);
6332 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6333 true, NULL_TREE, true, GSI_SAME_STMT);
6334 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6335 true, NULL_TREE, true, GSI_SAME_STMT);
6336 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6337 true, NULL_TREE, true, GSI_SAME_STMT);
6338 fd->chunk_size
6339 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
6340 true, NULL_TREE, true, GSI_SAME_STMT);
6342 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6343 t = fold_build2 (PLUS_EXPR, itype, step, t);
6344 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6345 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6346 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6347 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6348 fold_build1 (NEGATE_EXPR, itype, t),
6349 fold_build1 (NEGATE_EXPR, itype, step));
6350 else
6351 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6352 t = fold_convert (itype, t);
6353 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6354 true, GSI_SAME_STMT);
6356 trip_var = create_tmp_reg (itype, ".trip");
6357 if (gimple_in_ssa_p (cfun))
6359 trip_init = make_ssa_name (trip_var, NULL);
6360 trip_main = make_ssa_name (trip_var, NULL);
6361 trip_back = make_ssa_name (trip_var, NULL);
6363 else
6365 trip_init = trip_var;
6366 trip_main = trip_var;
6367 trip_back = trip_var;
6370 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
6371 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6373 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6374 t = fold_build2 (MULT_EXPR, itype, t, step);
6375 if (POINTER_TYPE_P (type))
6376 t = fold_build_pointer_plus (n1, t);
6377 else
6378 t = fold_build2 (PLUS_EXPR, type, t, n1);
6379 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6380 true, GSI_SAME_STMT);
6382 /* Remove the GIMPLE_OMP_FOR. */
6383 gsi_remove (&gsi, true);
6385 /* Iteration space partitioning goes in ITER_PART_BB. */
6386 gsi = gsi_last_bb (iter_part_bb);
6388 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6389 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6390 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6391 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6392 false, GSI_CONTINUE_LINKING);
6394 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6395 t = fold_build2 (MIN_EXPR, itype, t, n);
6396 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6397 false, GSI_CONTINUE_LINKING);
6399 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6400 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6402 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6403 gsi = gsi_start_bb (seq_start_bb);
6405 tree startvar = fd->loop.v;
6406 tree endvar = NULL_TREE;
6408 if (gimple_omp_for_combined_p (fd->for_stmt))
6410 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6411 ? gimple_omp_parallel_clauses (inner_stmt)
6412 : gimple_omp_for_clauses (inner_stmt);
6413 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6414 gcc_assert (innerc);
6415 startvar = OMP_CLAUSE_DECL (innerc);
6416 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6417 OMP_CLAUSE__LOOPTEMP_);
6418 gcc_assert (innerc);
6419 endvar = OMP_CLAUSE_DECL (innerc);
6422 t = fold_convert (itype, s0);
6423 t = fold_build2 (MULT_EXPR, itype, t, step);
6424 if (POINTER_TYPE_P (type))
6425 t = fold_build_pointer_plus (n1, t);
6426 else
6427 t = fold_build2 (PLUS_EXPR, type, t, n1);
6428 t = fold_convert (TREE_TYPE (startvar), t);
6429 t = force_gimple_operand_gsi (&gsi, t,
6430 DECL_P (startvar)
6431 && TREE_ADDRESSABLE (startvar),
6432 NULL_TREE, false, GSI_CONTINUE_LINKING);
6433 stmt = gimple_build_assign (startvar, t);
6434 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6436 t = fold_convert (itype, e0);
6437 t = fold_build2 (MULT_EXPR, itype, t, step);
6438 if (POINTER_TYPE_P (type))
6439 t = fold_build_pointer_plus (n1, t);
6440 else
6441 t = fold_build2 (PLUS_EXPR, type, t, n1);
6442 t = fold_convert (TREE_TYPE (startvar), t);
6443 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6444 false, GSI_CONTINUE_LINKING);
6445 if (endvar)
6447 stmt = gimple_build_assign (endvar, e);
6448 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6449 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6450 stmt = gimple_build_assign (fd->loop.v, e);
6451 else
6452 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6453 NULL_TREE);
6454 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6456 if (fd->collapse > 1)
6457 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6459 if (!broken_loop)
6461 /* The code controlling the sequential loop goes in CONT_BB,
6462 replacing the GIMPLE_OMP_CONTINUE. */
6463 gsi = gsi_last_bb (cont_bb);
6464 stmt = gsi_stmt (gsi);
6465 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6466 vmain = gimple_omp_continue_control_use (stmt);
6467 vback = gimple_omp_continue_control_def (stmt);
6469 if (!gimple_omp_for_combined_p (fd->for_stmt))
6471 if (POINTER_TYPE_P (type))
6472 t = fold_build_pointer_plus (vmain, step);
6473 else
6474 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6475 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
6476 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6477 true, GSI_SAME_STMT);
6478 stmt = gimple_build_assign (vback, t);
6479 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6481 t = build2 (fd->loop.cond_code, boolean_type_node,
6482 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6483 ? t : vback, e);
6484 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6487 /* Remove GIMPLE_OMP_CONTINUE. */
6488 gsi_remove (&gsi, true);
6490 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6491 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6493 /* Trip update code goes into TRIP_UPDATE_BB. */
6494 gsi = gsi_start_bb (trip_update_bb);
6496 t = build_int_cst (itype, 1);
6497 t = build2 (PLUS_EXPR, itype, trip_main, t);
6498 stmt = gimple_build_assign (trip_back, t);
6499 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6502 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6503 gsi = gsi_last_bb (exit_bb);
6504 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6506 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6507 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6509 gsi_remove (&gsi, true);
6511 /* Connect the new blocks. */
6512 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6513 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6515 if (!broken_loop)
6517 se = find_edge (cont_bb, body_bb);
6518 if (gimple_omp_for_combined_p (fd->for_stmt))
6520 remove_edge (se);
6521 se = NULL;
6523 else if (fd->collapse > 1)
6525 remove_edge (se);
6526 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6528 else
6529 se->flags = EDGE_TRUE_VALUE;
6530 find_edge (cont_bb, trip_update_bb)->flags
6531 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6533 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6536 if (gimple_in_ssa_p (cfun))
6538 gimple_stmt_iterator psi;
6539 gimple phi;
6540 edge re, ene;
6541 edge_var_map *vm;
6542 size_t i;
6544 gcc_assert (fd->collapse == 1 && !broken_loop);
6546 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6547 remove arguments of the phi nodes in fin_bb. We need to create
6548 appropriate phi nodes in iter_part_bb instead. */
6549 se = single_pred_edge (fin_bb);
6550 re = single_succ_edge (trip_update_bb);
6551 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
6552 ene = single_succ_edge (entry_bb);
6554 psi = gsi_start_phis (fin_bb);
6555 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6556 gsi_next (&psi), ++i)
6558 gimple nphi;
6559 source_location locus;
6561 phi = gsi_stmt (psi);
6562 t = gimple_phi_result (phi);
6563 gcc_assert (t == redirect_edge_var_map_result (vm));
6564 nphi = create_phi_node (t, iter_part_bb);
6566 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6567 locus = gimple_phi_arg_location_from_edge (phi, se);
6569 /* A special case -- fd->loop.v is not yet computed in
6570 iter_part_bb, we need to use vextra instead. */
6571 if (t == fd->loop.v)
6572 t = vextra;
6573 add_phi_arg (nphi, t, ene, locus);
6574 locus = redirect_edge_var_map_location (vm);
6575 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6577 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6578 redirect_edge_var_map_clear (re);
6579 while (1)
6581 psi = gsi_start_phis (fin_bb);
6582 if (gsi_end_p (psi))
6583 break;
6584 remove_phi_node (&psi, false);
6587 /* Make phi node for trip. */
6588 phi = create_phi_node (trip_main, iter_part_bb);
6589 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6590 UNKNOWN_LOCATION);
6591 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6592 UNKNOWN_LOCATION);
6595 if (!broken_loop)
6596 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6597 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6598 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6599 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6600 recompute_dominator (CDI_DOMINATORS, fin_bb));
6601 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6602 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6603 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6604 recompute_dominator (CDI_DOMINATORS, body_bb));
6606 if (!broken_loop)
6608 struct loop *trip_loop = alloc_loop ();
6609 trip_loop->header = iter_part_bb;
6610 trip_loop->latch = trip_update_bb;
6611 add_loop (trip_loop, iter_part_bb->loop_father);
6613 if (!gimple_omp_for_combined_p (fd->for_stmt))
6615 struct loop *loop = alloc_loop ();
6616 loop->header = body_bb;
6617 if (collapse_bb == NULL)
6618 loop->latch = cont_bb;
6619 add_loop (loop, trip_loop);
6625 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
6626 loop. Given parameters:
6628 for (V = N1; V cond N2; V += STEP) BODY;
6630 where COND is "<" or ">", we generate pseudocode
6632 V = N1;
6633 goto L1;
6635 BODY;
6636 V += STEP;
6638 if (V cond N2) goto L0; else goto L2;
6641 For collapsed loops, given parameters:
6642 collapse(3)
6643 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6644 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6645 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6646 BODY;
6648 we generate pseudocode
6650 if (cond3 is <)
6651 adj = STEP3 - 1;
6652 else
6653 adj = STEP3 + 1;
6654 count3 = (adj + N32 - N31) / STEP3;
6655 if (cond2 is <)
6656 adj = STEP2 - 1;
6657 else
6658 adj = STEP2 + 1;
6659 count2 = (adj + N22 - N21) / STEP2;
6660 if (cond1 is <)
6661 adj = STEP1 - 1;
6662 else
6663 adj = STEP1 + 1;
6664 count1 = (adj + N12 - N11) / STEP1;
6665 count = count1 * count2 * count3;
6666 V = 0;
6667 V1 = N11;
6668 V2 = N21;
6669 V3 = N31;
6670 goto L1;
6672 BODY;
6673 V += 1;
6674 V3 += STEP3;
6675 V2 += (V3 cond3 N32) ? 0 : STEP2;
6676 V3 = (V3 cond3 N32) ? V3 : N31;
6677 V1 += (V2 cond2 N22) ? 0 : STEP1;
6678 V2 = (V2 cond2 N22) ? V2 : N21;
6680 if (V < count) goto L0; else goto L2;
6685 static void
6686 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
6688 tree type, t;
6689 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
6690 gimple_stmt_iterator gsi;
6691 gimple stmt;
6692 bool broken_loop = region->cont == NULL;
6693 edge e, ne;
6694 tree *counts = NULL;
6695 int i;
6696 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6697 OMP_CLAUSE_SAFELEN);
6698 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6699 OMP_CLAUSE__SIMDUID_);
6700 tree n1, n2;
6702 type = TREE_TYPE (fd->loop.v);
6703 entry_bb = region->entry;
6704 cont_bb = region->cont;
6705 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6706 gcc_assert (broken_loop
6707 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6708 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6709 if (!broken_loop)
6711 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6712 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6713 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6714 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6716 else
6718 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6719 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6720 l2_bb = single_succ (l1_bb);
6722 exit_bb = region->exit;
6723 l2_dom_bb = NULL;
6725 gsi = gsi_last_bb (entry_bb);
6727 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6728 /* Not needed in SSA form right now. */
6729 gcc_assert (!gimple_in_ssa_p (cfun));
6730 if (fd->collapse > 1)
6732 int first_zero_iter = -1;
6733 basic_block zero_iter_bb = l2_bb;
6735 counts = XALLOCAVEC (tree, fd->collapse);
6736 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6737 zero_iter_bb, first_zero_iter,
6738 l2_dom_bb);
6740 if (l2_dom_bb == NULL)
6741 l2_dom_bb = l1_bb;
6743 n1 = fd->loop.n1;
6744 n2 = fd->loop.n2;
6745 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6747 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6748 OMP_CLAUSE__LOOPTEMP_);
6749 gcc_assert (innerc);
6750 n1 = OMP_CLAUSE_DECL (innerc);
6751 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6752 OMP_CLAUSE__LOOPTEMP_);
6753 gcc_assert (innerc);
6754 n2 = OMP_CLAUSE_DECL (innerc);
6755 expand_omp_build_assign (&gsi, fd->loop.v,
6756 fold_convert (type, n1));
6757 if (fd->collapse > 1)
6759 gsi_prev (&gsi);
6760 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
6761 gsi_next (&gsi);
6764 else
6766 expand_omp_build_assign (&gsi, fd->loop.v,
6767 fold_convert (type, fd->loop.n1));
6768 if (fd->collapse > 1)
6769 for (i = 0; i < fd->collapse; i++)
6771 tree itype = TREE_TYPE (fd->loops[i].v);
6772 if (POINTER_TYPE_P (itype))
6773 itype = signed_type_for (itype);
6774 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
6775 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6779 /* Remove the GIMPLE_OMP_FOR statement. */
6780 gsi_remove (&gsi, true);
6782 if (!broken_loop)
6784 /* Code to control the increment goes in the CONT_BB. */
6785 gsi = gsi_last_bb (cont_bb);
6786 stmt = gsi_stmt (gsi);
6787 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6789 if (POINTER_TYPE_P (type))
6790 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
6791 else
6792 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
6793 expand_omp_build_assign (&gsi, fd->loop.v, t);
6795 if (fd->collapse > 1)
6797 i = fd->collapse - 1;
6798 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
6800 t = fold_convert (sizetype, fd->loops[i].step);
6801 t = fold_build_pointer_plus (fd->loops[i].v, t);
6803 else
6805 t = fold_convert (TREE_TYPE (fd->loops[i].v),
6806 fd->loops[i].step);
6807 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
6808 fd->loops[i].v, t);
6810 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6812 for (i = fd->collapse - 1; i > 0; i--)
6814 tree itype = TREE_TYPE (fd->loops[i].v);
6815 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
6816 if (POINTER_TYPE_P (itype2))
6817 itype2 = signed_type_for (itype2);
6818 t = build3 (COND_EXPR, itype2,
6819 build2 (fd->loops[i].cond_code, boolean_type_node,
6820 fd->loops[i].v,
6821 fold_convert (itype, fd->loops[i].n2)),
6822 build_int_cst (itype2, 0),
6823 fold_convert (itype2, fd->loops[i - 1].step));
6824 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
6825 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
6826 else
6827 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
6828 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
6830 t = build3 (COND_EXPR, itype,
6831 build2 (fd->loops[i].cond_code, boolean_type_node,
6832 fd->loops[i].v,
6833 fold_convert (itype, fd->loops[i].n2)),
6834 fd->loops[i].v,
6835 fold_convert (itype, fd->loops[i].n1));
6836 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6840 /* Remove GIMPLE_OMP_CONTINUE. */
6841 gsi_remove (&gsi, true);
6844 /* Emit the condition in L1_BB. */
6845 gsi = gsi_start_bb (l1_bb);
6847 t = fold_convert (type, n2);
6848 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6849 false, GSI_CONTINUE_LINKING);
6850 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
6851 stmt = gimple_build_cond_empty (t);
6852 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6853 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
6854 NULL, NULL)
6855 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
6856 NULL, NULL))
6858 gsi = gsi_for_stmt (stmt);
6859 gimple_regimplify_operands (stmt, &gsi);
6862 /* Remove GIMPLE_OMP_RETURN. */
6863 gsi = gsi_last_bb (exit_bb);
6864 gsi_remove (&gsi, true);
6866 /* Connect the new blocks. */
6867 remove_edge (FALLTHRU_EDGE (entry_bb));
6869 if (!broken_loop)
6871 remove_edge (BRANCH_EDGE (entry_bb));
6872 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6874 e = BRANCH_EDGE (l1_bb);
6875 ne = FALLTHRU_EDGE (l1_bb);
6876 e->flags = EDGE_TRUE_VALUE;
6878 else
6880 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6882 ne = single_succ_edge (l1_bb);
6883 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6886 ne->flags = EDGE_FALSE_VALUE;
6887 e->probability = REG_BR_PROB_BASE * 7 / 8;
6888 ne->probability = REG_BR_PROB_BASE / 8;
6890 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6891 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6892 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6894 if (!broken_loop)
6896 struct loop *loop = alloc_loop ();
6897 loop->header = l1_bb;
6898 loop->latch = cont_bb;
6899 add_loop (loop, l1_bb->loop_father);
6900 if (safelen == NULL_TREE)
6901 loop->safelen = INT_MAX;
6902 else
6904 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
6905 if (TREE_CODE (safelen) != INTEGER_CST)
6906 loop->safelen = 0;
6907 else if (!tree_fits_uhwi_p (safelen)
6908 || tree_to_uhwi (safelen) > INT_MAX)
6909 loop->safelen = INT_MAX;
6910 else
6911 loop->safelen = tree_to_uhwi (safelen);
6912 if (loop->safelen == 1)
6913 loop->safelen = 0;
6915 if (simduid)
6917 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
6918 cfun->has_simduid_loops = true;
6920 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
6921 the loop. */
6922 if ((flag_tree_loop_vectorize
6923 || (!global_options_set.x_flag_tree_loop_vectorize
6924 && !global_options_set.x_flag_tree_vectorize))
6925 && flag_tree_loop_optimize
6926 && loop->safelen > 1)
6928 loop->force_vectorize = true;
6929 cfun->has_force_vectorize_loops = true;
6935 /* Expand the OpenMP loop defined by REGION. */
6937 static void
6938 expand_omp_for (struct omp_region *region, gimple inner_stmt)
6940 struct omp_for_data fd;
6941 struct omp_for_data_loop *loops;
6943 loops
6944 = (struct omp_for_data_loop *)
6945 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
6946 * sizeof (struct omp_for_data_loop));
6947 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
6948 region->sched_kind = fd.sched_kind;
6950 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
6951 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6952 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6953 if (region->cont)
6955 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
6956 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6957 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6959 else
6960 /* If there isn't a continue then this is a degerate case where
6961 the introduction of abnormal edges during lowering will prevent
6962 original loops from being detected. Fix that up. */
6963 loops_state_set (LOOPS_NEED_FIXUP);
6965 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
6966 expand_omp_simd (region, &fd);
6967 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
6968 && !fd.have_ordered)
6970 if (fd.chunk_size == NULL)
6971 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
6972 else
6973 expand_omp_for_static_chunk (region, &fd, inner_stmt);
6975 else
6977 int fn_index, start_ix, next_ix;
6979 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
6980 == GF_OMP_FOR_KIND_FOR);
6981 if (fd.chunk_size == NULL
6982 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
6983 fd.chunk_size = integer_zero_node;
6984 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
6985 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
6986 ? 3 : fd.sched_kind;
6987 fn_index += fd.have_ordered * 4;
6988 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
6989 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
6990 if (fd.iter_type == long_long_unsigned_type_node)
6992 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
6993 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
6994 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
6995 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
6997 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
6998 (enum built_in_function) next_ix, inner_stmt);
7001 if (gimple_in_ssa_p (cfun))
7002 update_ssa (TODO_update_ssa_only_virtuals);
7006 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7008 v = GOMP_sections_start (n);
7010 switch (v)
7012 case 0:
7013 goto L2;
7014 case 1:
7015 section 1;
7016 goto L1;
7017 case 2:
7019 case n:
7021 default:
7022 abort ();
7025 v = GOMP_sections_next ();
7026 goto L0;
7028 reduction;
7030 If this is a combined parallel sections, replace the call to
7031 GOMP_sections_start with call to GOMP_sections_next. */
7033 static void
7034 expand_omp_sections (struct omp_region *region)
7036 tree t, u, vin = NULL, vmain, vnext, l2;
7037 unsigned len;
7038 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7039 gimple_stmt_iterator si, switch_si;
7040 gimple sections_stmt, stmt, cont;
7041 edge_iterator ei;
7042 edge e;
7043 struct omp_region *inner;
7044 unsigned i, casei;
7045 bool exit_reachable = region->cont != NULL;
7047 gcc_assert (region->exit != NULL);
7048 entry_bb = region->entry;
7049 l0_bb = single_succ (entry_bb);
7050 l1_bb = region->cont;
7051 l2_bb = region->exit;
7052 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7053 l2 = gimple_block_label (l2_bb);
7054 else
7056 /* This can happen if there are reductions. */
7057 len = EDGE_COUNT (l0_bb->succs);
7058 gcc_assert (len > 0);
7059 e = EDGE_SUCC (l0_bb, len - 1);
7060 si = gsi_last_bb (e->dest);
7061 l2 = NULL_TREE;
7062 if (gsi_end_p (si)
7063 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7064 l2 = gimple_block_label (e->dest);
7065 else
7066 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7068 si = gsi_last_bb (e->dest);
7069 if (gsi_end_p (si)
7070 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7072 l2 = gimple_block_label (e->dest);
7073 break;
7077 if (exit_reachable)
7078 default_bb = create_empty_bb (l1_bb->prev_bb);
7079 else
7080 default_bb = create_empty_bb (l0_bb);
7082 /* We will build a switch() with enough cases for all the
7083 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7084 and a default case to abort if something goes wrong. */
7085 len = EDGE_COUNT (l0_bb->succs);
7087 /* Use vec::quick_push on label_vec throughout, since we know the size
7088 in advance. */
7089 auto_vec<tree> label_vec (len);
7091 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7092 GIMPLE_OMP_SECTIONS statement. */
7093 si = gsi_last_bb (entry_bb);
7094 sections_stmt = gsi_stmt (si);
7095 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7096 vin = gimple_omp_sections_control (sections_stmt);
7097 if (!is_combined_parallel (region))
7099 /* If we are not inside a combined parallel+sections region,
7100 call GOMP_sections_start. */
7101 t = build_int_cst (unsigned_type_node, len - 1);
7102 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7103 stmt = gimple_build_call (u, 1, t);
7105 else
7107 /* Otherwise, call GOMP_sections_next. */
7108 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7109 stmt = gimple_build_call (u, 0);
7111 gimple_call_set_lhs (stmt, vin);
7112 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7113 gsi_remove (&si, true);
7115 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7116 L0_BB. */
7117 switch_si = gsi_last_bb (l0_bb);
7118 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
7119 if (exit_reachable)
7121 cont = last_stmt (l1_bb);
7122 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
7123 vmain = gimple_omp_continue_control_use (cont);
7124 vnext = gimple_omp_continue_control_def (cont);
7126 else
7128 vmain = vin;
7129 vnext = NULL_TREE;
7132 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
7133 label_vec.quick_push (t);
7134 i = 1;
7136 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7137 for (inner = region->inner, casei = 1;
7138 inner;
7139 inner = inner->next, i++, casei++)
7141 basic_block s_entry_bb, s_exit_bb;
7143 /* Skip optional reduction region. */
7144 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7146 --i;
7147 --casei;
7148 continue;
7151 s_entry_bb = inner->entry;
7152 s_exit_bb = inner->exit;
7154 t = gimple_block_label (s_entry_bb);
7155 u = build_int_cst (unsigned_type_node, casei);
7156 u = build_case_label (u, NULL, t);
7157 label_vec.quick_push (u);
7159 si = gsi_last_bb (s_entry_bb);
7160 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7161 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7162 gsi_remove (&si, true);
7163 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7165 if (s_exit_bb == NULL)
7166 continue;
7168 si = gsi_last_bb (s_exit_bb);
7169 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7170 gsi_remove (&si, true);
7172 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7175 /* Error handling code goes in DEFAULT_BB. */
7176 t = gimple_block_label (default_bb);
7177 u = build_case_label (NULL, NULL, t);
7178 make_edge (l0_bb, default_bb, 0);
7179 add_bb_to_loop (default_bb, current_loops->tree_root);
7181 stmt = gimple_build_switch (vmain, u, label_vec);
7182 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7183 gsi_remove (&switch_si, true);
7185 si = gsi_start_bb (default_bb);
7186 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7187 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7189 if (exit_reachable)
7191 tree bfn_decl;
7193 /* Code to get the next section goes in L1_BB. */
7194 si = gsi_last_bb (l1_bb);
7195 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7197 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7198 stmt = gimple_build_call (bfn_decl, 0);
7199 gimple_call_set_lhs (stmt, vnext);
7200 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7201 gsi_remove (&si, true);
7203 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7206 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7207 si = gsi_last_bb (l2_bb);
7208 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7209 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7210 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7211 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7212 else
7213 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7214 stmt = gimple_build_call (t, 0);
7215 if (gimple_omp_return_lhs (gsi_stmt (si)))
7216 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7217 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7218 gsi_remove (&si, true);
7220 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7224 /* Expand code for an OpenMP single directive. We've already expanded
7225 much of the code, here we simply place the GOMP_barrier call. */
7227 static void
7228 expand_omp_single (struct omp_region *region)
7230 basic_block entry_bb, exit_bb;
7231 gimple_stmt_iterator si;
7233 entry_bb = region->entry;
7234 exit_bb = region->exit;
7236 si = gsi_last_bb (entry_bb);
7237 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7238 gsi_remove (&si, true);
7239 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7241 si = gsi_last_bb (exit_bb);
7242 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7244 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7245 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7247 gsi_remove (&si, true);
7248 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7252 /* Generic expansion for OpenMP synchronization directives: master,
7253 ordered and critical. All we need to do here is remove the entry
7254 and exit markers for REGION. */
7256 static void
7257 expand_omp_synch (struct omp_region *region)
7259 basic_block entry_bb, exit_bb;
7260 gimple_stmt_iterator si;
7262 entry_bb = region->entry;
7263 exit_bb = region->exit;
7265 si = gsi_last_bb (entry_bb);
7266 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7267 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7268 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7269 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7270 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7271 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7272 gsi_remove (&si, true);
7273 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7275 if (exit_bb)
7277 si = gsi_last_bb (exit_bb);
7278 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7279 gsi_remove (&si, true);
7280 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7284 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7285 operation as a normal volatile load. */
7287 static bool
7288 expand_omp_atomic_load (basic_block load_bb, tree addr,
7289 tree loaded_val, int index)
7291 enum built_in_function tmpbase;
7292 gimple_stmt_iterator gsi;
7293 basic_block store_bb;
7294 location_t loc;
7295 gimple stmt;
7296 tree decl, call, type, itype;
7298 gsi = gsi_last_bb (load_bb);
7299 stmt = gsi_stmt (gsi);
7300 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7301 loc = gimple_location (stmt);
7303 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7304 is smaller than word size, then expand_atomic_load assumes that the load
7305 is atomic. We could avoid the builtin entirely in this case. */
7307 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7308 decl = builtin_decl_explicit (tmpbase);
7309 if (decl == NULL_TREE)
7310 return false;
7312 type = TREE_TYPE (loaded_val);
7313 itype = TREE_TYPE (TREE_TYPE (decl));
7315 call = build_call_expr_loc (loc, decl, 2, addr,
7316 build_int_cst (NULL,
7317 gimple_omp_atomic_seq_cst_p (stmt)
7318 ? MEMMODEL_SEQ_CST
7319 : MEMMODEL_RELAXED));
7320 if (!useless_type_conversion_p (type, itype))
7321 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7322 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7324 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7325 gsi_remove (&gsi, true);
7327 store_bb = single_succ (load_bb);
7328 gsi = gsi_last_bb (store_bb);
7329 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7330 gsi_remove (&gsi, true);
7332 if (gimple_in_ssa_p (cfun))
7333 update_ssa (TODO_update_ssa_no_phi);
7335 return true;
7338 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7339 operation as a normal volatile store. */
7341 static bool
7342 expand_omp_atomic_store (basic_block load_bb, tree addr,
7343 tree loaded_val, tree stored_val, int index)
7345 enum built_in_function tmpbase;
7346 gimple_stmt_iterator gsi;
7347 basic_block store_bb = single_succ (load_bb);
7348 location_t loc;
7349 gimple stmt;
7350 tree decl, call, type, itype;
7351 enum machine_mode imode;
7352 bool exchange;
7354 gsi = gsi_last_bb (load_bb);
7355 stmt = gsi_stmt (gsi);
7356 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7358 /* If the load value is needed, then this isn't a store but an exchange. */
7359 exchange = gimple_omp_atomic_need_value_p (stmt);
7361 gsi = gsi_last_bb (store_bb);
7362 stmt = gsi_stmt (gsi);
7363 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7364 loc = gimple_location (stmt);
7366 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7367 is smaller than word size, then expand_atomic_store assumes that the store
7368 is atomic. We could avoid the builtin entirely in this case. */
7370 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7371 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7372 decl = builtin_decl_explicit (tmpbase);
7373 if (decl == NULL_TREE)
7374 return false;
7376 type = TREE_TYPE (stored_val);
7378 /* Dig out the type of the function's second argument. */
7379 itype = TREE_TYPE (decl);
7380 itype = TYPE_ARG_TYPES (itype);
7381 itype = TREE_CHAIN (itype);
7382 itype = TREE_VALUE (itype);
7383 imode = TYPE_MODE (itype);
7385 if (exchange && !can_atomic_exchange_p (imode, true))
7386 return false;
7388 if (!useless_type_conversion_p (itype, type))
7389 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7390 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7391 build_int_cst (NULL,
7392 gimple_omp_atomic_seq_cst_p (stmt)
7393 ? MEMMODEL_SEQ_CST
7394 : MEMMODEL_RELAXED));
7395 if (exchange)
7397 if (!useless_type_conversion_p (type, itype))
7398 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7399 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7402 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7403 gsi_remove (&gsi, true);
7405 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7406 gsi = gsi_last_bb (load_bb);
7407 gsi_remove (&gsi, true);
7409 if (gimple_in_ssa_p (cfun))
7410 update_ssa (TODO_update_ssa_no_phi);
7412 return true;
7415 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7416 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7417 size of the data type, and thus usable to find the index of the builtin
7418 decl. Returns false if the expression is not of the proper form. */
7420 static bool
7421 expand_omp_atomic_fetch_op (basic_block load_bb,
7422 tree addr, tree loaded_val,
7423 tree stored_val, int index)
7425 enum built_in_function oldbase, newbase, tmpbase;
7426 tree decl, itype, call;
7427 tree lhs, rhs;
7428 basic_block store_bb = single_succ (load_bb);
7429 gimple_stmt_iterator gsi;
7430 gimple stmt;
7431 location_t loc;
7432 enum tree_code code;
7433 bool need_old, need_new;
7434 enum machine_mode imode;
7435 bool seq_cst;
7437 /* We expect to find the following sequences:
7439 load_bb:
7440 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7442 store_bb:
7443 val = tmp OP something; (or: something OP tmp)
7444 GIMPLE_OMP_STORE (val)
7446 ???FIXME: Allow a more flexible sequence.
7447 Perhaps use data flow to pick the statements.
7451 gsi = gsi_after_labels (store_bb);
7452 stmt = gsi_stmt (gsi);
7453 loc = gimple_location (stmt);
7454 if (!is_gimple_assign (stmt))
7455 return false;
7456 gsi_next (&gsi);
7457 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7458 return false;
7459 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7460 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7461 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7462 gcc_checking_assert (!need_old || !need_new);
7464 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7465 return false;
7467 /* Check for one of the supported fetch-op operations. */
7468 code = gimple_assign_rhs_code (stmt);
7469 switch (code)
7471 case PLUS_EXPR:
7472 case POINTER_PLUS_EXPR:
7473 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7474 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7475 break;
7476 case MINUS_EXPR:
7477 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7478 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7479 break;
7480 case BIT_AND_EXPR:
7481 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7482 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7483 break;
7484 case BIT_IOR_EXPR:
7485 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7486 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7487 break;
7488 case BIT_XOR_EXPR:
7489 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7490 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7491 break;
7492 default:
7493 return false;
7496 /* Make sure the expression is of the proper form. */
7497 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7498 rhs = gimple_assign_rhs2 (stmt);
7499 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7500 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7501 rhs = gimple_assign_rhs1 (stmt);
7502 else
7503 return false;
7505 tmpbase = ((enum built_in_function)
7506 ((need_new ? newbase : oldbase) + index + 1));
7507 decl = builtin_decl_explicit (tmpbase);
7508 if (decl == NULL_TREE)
7509 return false;
7510 itype = TREE_TYPE (TREE_TYPE (decl));
7511 imode = TYPE_MODE (itype);
7513 /* We could test all of the various optabs involved, but the fact of the
7514 matter is that (with the exception of i486 vs i586 and xadd) all targets
7515 that support any atomic operaton optab also implements compare-and-swap.
7516 Let optabs.c take care of expanding any compare-and-swap loop. */
7517 if (!can_compare_and_swap_p (imode, true))
7518 return false;
7520 gsi = gsi_last_bb (load_bb);
7521 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7523 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7524 It only requires that the operation happen atomically. Thus we can
7525 use the RELAXED memory model. */
7526 call = build_call_expr_loc (loc, decl, 3, addr,
7527 fold_convert_loc (loc, itype, rhs),
7528 build_int_cst (NULL,
7529 seq_cst ? MEMMODEL_SEQ_CST
7530 : MEMMODEL_RELAXED));
7532 if (need_old || need_new)
7534 lhs = need_old ? loaded_val : stored_val;
7535 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7536 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7538 else
7539 call = fold_convert_loc (loc, void_type_node, call);
7540 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7541 gsi_remove (&gsi, true);
7543 gsi = gsi_last_bb (store_bb);
7544 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7545 gsi_remove (&gsi, true);
7546 gsi = gsi_last_bb (store_bb);
7547 gsi_remove (&gsi, true);
7549 if (gimple_in_ssa_p (cfun))
7550 update_ssa (TODO_update_ssa_no_phi);
7552 return true;
7555 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7557 oldval = *addr;
7558 repeat:
7559 newval = rhs; // with oldval replacing *addr in rhs
7560 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7561 if (oldval != newval)
7562 goto repeat;
7564 INDEX is log2 of the size of the data type, and thus usable to find the
7565 index of the builtin decl. */
7567 static bool
7568 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7569 tree addr, tree loaded_val, tree stored_val,
7570 int index)
7572 tree loadedi, storedi, initial, new_storedi, old_vali;
7573 tree type, itype, cmpxchg, iaddr;
7574 gimple_stmt_iterator si;
7575 basic_block loop_header = single_succ (load_bb);
7576 gimple phi, stmt;
7577 edge e;
7578 enum built_in_function fncode;
7580 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7581 order to use the RELAXED memory model effectively. */
7582 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7583 + index + 1);
7584 cmpxchg = builtin_decl_explicit (fncode);
7585 if (cmpxchg == NULL_TREE)
7586 return false;
7587 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7588 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7590 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7591 return false;
7593 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7594 si = gsi_last_bb (load_bb);
7595 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7597 /* For floating-point values, we'll need to view-convert them to integers
7598 so that we can perform the atomic compare and swap. Simplify the
7599 following code by always setting up the "i"ntegral variables. */
7600 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7602 tree iaddr_val;
7604 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7605 true), NULL);
7606 iaddr_val
7607 = force_gimple_operand_gsi (&si,
7608 fold_convert (TREE_TYPE (iaddr), addr),
7609 false, NULL_TREE, true, GSI_SAME_STMT);
7610 stmt = gimple_build_assign (iaddr, iaddr_val);
7611 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7612 loadedi = create_tmp_var (itype, NULL);
7613 if (gimple_in_ssa_p (cfun))
7614 loadedi = make_ssa_name (loadedi, NULL);
7616 else
7618 iaddr = addr;
7619 loadedi = loaded_val;
7622 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7623 tree loaddecl = builtin_decl_explicit (fncode);
7624 if (loaddecl)
7625 initial
7626 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
7627 build_call_expr (loaddecl, 2, iaddr,
7628 build_int_cst (NULL_TREE,
7629 MEMMODEL_RELAXED)));
7630 else
7631 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
7632 build_int_cst (TREE_TYPE (iaddr), 0));
7634 initial
7635 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
7636 GSI_SAME_STMT);
7638 /* Move the value to the LOADEDI temporary. */
7639 if (gimple_in_ssa_p (cfun))
7641 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
7642 phi = create_phi_node (loadedi, loop_header);
7643 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
7644 initial);
7646 else
7647 gsi_insert_before (&si,
7648 gimple_build_assign (loadedi, initial),
7649 GSI_SAME_STMT);
7650 if (loadedi != loaded_val)
7652 gimple_stmt_iterator gsi2;
7653 tree x;
7655 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
7656 gsi2 = gsi_start_bb (loop_header);
7657 if (gimple_in_ssa_p (cfun))
7659 gimple stmt;
7660 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7661 true, GSI_SAME_STMT);
7662 stmt = gimple_build_assign (loaded_val, x);
7663 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
7665 else
7667 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
7668 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7669 true, GSI_SAME_STMT);
7672 gsi_remove (&si, true);
7674 si = gsi_last_bb (store_bb);
7675 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
7677 if (iaddr == addr)
7678 storedi = stored_val;
7679 else
7680 storedi =
7681 force_gimple_operand_gsi (&si,
7682 build1 (VIEW_CONVERT_EXPR, itype,
7683 stored_val), true, NULL_TREE, true,
7684 GSI_SAME_STMT);
7686 /* Build the compare&swap statement. */
7687 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
7688 new_storedi = force_gimple_operand_gsi (&si,
7689 fold_convert (TREE_TYPE (loadedi),
7690 new_storedi),
7691 true, NULL_TREE,
7692 true, GSI_SAME_STMT);
7694 if (gimple_in_ssa_p (cfun))
7695 old_vali = loadedi;
7696 else
7698 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
7699 stmt = gimple_build_assign (old_vali, loadedi);
7700 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7702 stmt = gimple_build_assign (loadedi, new_storedi);
7703 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7706 /* Note that we always perform the comparison as an integer, even for
7707 floating point. This allows the atomic operation to properly
7708 succeed even with NaNs and -0.0. */
7709 stmt = gimple_build_cond_empty
7710 (build2 (NE_EXPR, boolean_type_node,
7711 new_storedi, old_vali));
7712 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7714 /* Update cfg. */
7715 e = single_succ_edge (store_bb);
7716 e->flags &= ~EDGE_FALLTHRU;
7717 e->flags |= EDGE_FALSE_VALUE;
7719 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
7721 /* Copy the new value to loadedi (we already did that before the condition
7722 if we are not in SSA). */
7723 if (gimple_in_ssa_p (cfun))
7725 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
7726 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
7729 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
7730 gsi_remove (&si, true);
7732 struct loop *loop = alloc_loop ();
7733 loop->header = loop_header;
7734 loop->latch = store_bb;
7735 add_loop (loop, loop_header->loop_father);
7737 if (gimple_in_ssa_p (cfun))
7738 update_ssa (TODO_update_ssa_no_phi);
7740 return true;
7743 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7745 GOMP_atomic_start ();
7746 *addr = rhs;
7747 GOMP_atomic_end ();
7749 The result is not globally atomic, but works so long as all parallel
7750 references are within #pragma omp atomic directives. According to
7751 responses received from omp@openmp.org, appears to be within spec.
7752 Which makes sense, since that's how several other compilers handle
7753 this situation as well.
7754 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
7755 expanding. STORED_VAL is the operand of the matching
7756 GIMPLE_OMP_ATOMIC_STORE.
7758 We replace
7759 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
7760 loaded_val = *addr;
7762 and replace
7763 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
7764 *addr = stored_val;
7767 static bool
7768 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
7769 tree addr, tree loaded_val, tree stored_val)
7771 gimple_stmt_iterator si;
7772 gimple stmt;
7773 tree t;
7775 si = gsi_last_bb (load_bb);
7776 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7778 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
7779 t = build_call_expr (t, 0);
7780 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
7782 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
7783 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7784 gsi_remove (&si, true);
7786 si = gsi_last_bb (store_bb);
7787 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
7789 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
7790 stored_val);
7791 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7793 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
7794 t = build_call_expr (t, 0);
7795 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
7796 gsi_remove (&si, true);
7798 if (gimple_in_ssa_p (cfun))
7799 update_ssa (TODO_update_ssa_no_phi);
7800 return true;
7803 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
7804 using expand_omp_atomic_fetch_op. If it failed, we try to
7805 call expand_omp_atomic_pipeline, and if it fails too, the
7806 ultimate fallback is wrapping the operation in a mutex
7807 (expand_omp_atomic_mutex). REGION is the atomic region built
7808 by build_omp_regions_1(). */
7810 static void
7811 expand_omp_atomic (struct omp_region *region)
7813 basic_block load_bb = region->entry, store_bb = region->exit;
7814 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
7815 tree loaded_val = gimple_omp_atomic_load_lhs (load);
7816 tree addr = gimple_omp_atomic_load_rhs (load);
7817 tree stored_val = gimple_omp_atomic_store_val (store);
7818 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7819 HOST_WIDE_INT index;
7821 /* Make sure the type is one of the supported sizes. */
7822 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
7823 index = exact_log2 (index);
7824 if (index >= 0 && index <= 4)
7826 unsigned int align = TYPE_ALIGN_UNIT (type);
7828 /* __sync builtins require strict data alignment. */
7829 if (exact_log2 (align) >= index)
7831 /* Atomic load. */
7832 if (loaded_val == stored_val
7833 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7834 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7835 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
7836 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
7837 return;
7839 /* Atomic store. */
7840 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7841 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7842 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
7843 && store_bb == single_succ (load_bb)
7844 && first_stmt (store_bb) == store
7845 && expand_omp_atomic_store (load_bb, addr, loaded_val,
7846 stored_val, index))
7847 return;
7849 /* When possible, use specialized atomic update functions. */
7850 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
7851 && store_bb == single_succ (load_bb)
7852 && expand_omp_atomic_fetch_op (load_bb, addr,
7853 loaded_val, stored_val, index))
7854 return;
7856 /* If we don't have specialized __sync builtins, try and implement
7857 as a compare and swap loop. */
7858 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
7859 loaded_val, stored_val, index))
7860 return;
7864 /* The ultimate fallback is wrapping the operation in a mutex. */
7865 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
7869 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
7871 static void
7872 expand_omp_target (struct omp_region *region)
7874 basic_block entry_bb, exit_bb, new_bb;
7875 struct function *child_cfun = NULL;
7876 tree child_fn = NULL_TREE, block, t;
7877 gimple_stmt_iterator gsi;
7878 gimple entry_stmt, stmt;
7879 edge e;
7881 entry_stmt = last_stmt (region->entry);
7882 new_bb = region->entry;
7883 int kind = gimple_omp_target_kind (entry_stmt);
7884 if (kind == GF_OMP_TARGET_KIND_REGION)
7886 child_fn = gimple_omp_target_child_fn (entry_stmt);
7887 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7890 entry_bb = region->entry;
7891 exit_bb = region->exit;
7893 if (kind == GF_OMP_TARGET_KIND_REGION)
7895 unsigned srcidx, dstidx, num;
7897 /* If the target region needs data sent from the parent
7898 function, then the very first statement (except possible
7899 tree profile counter updates) of the parallel body
7900 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
7901 &.OMP_DATA_O is passed as an argument to the child function,
7902 we need to replace it with the argument as seen by the child
7903 function.
7905 In most cases, this will end up being the identity assignment
7906 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
7907 a function call that has been inlined, the original PARM_DECL
7908 .OMP_DATA_I may have been converted into a different local
7909 variable. In which case, we need to keep the assignment. */
7910 if (gimple_omp_target_data_arg (entry_stmt))
7912 basic_block entry_succ_bb = single_succ (entry_bb);
7913 gimple_stmt_iterator gsi;
7914 tree arg;
7915 gimple tgtcopy_stmt = NULL;
7916 tree sender
7917 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
7919 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
7921 gcc_assert (!gsi_end_p (gsi));
7922 stmt = gsi_stmt (gsi);
7923 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7924 continue;
7926 if (gimple_num_ops (stmt) == 2)
7928 tree arg = gimple_assign_rhs1 (stmt);
7930 /* We're ignoring the subcode because we're
7931 effectively doing a STRIP_NOPS. */
7933 if (TREE_CODE (arg) == ADDR_EXPR
7934 && TREE_OPERAND (arg, 0) == sender)
7936 tgtcopy_stmt = stmt;
7937 break;
7942 gcc_assert (tgtcopy_stmt != NULL);
7943 arg = DECL_ARGUMENTS (child_fn);
7945 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
7946 gsi_remove (&gsi, true);
7949 /* Declare local variables needed in CHILD_CFUN. */
7950 block = DECL_INITIAL (child_fn);
7951 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
7952 /* The gimplifier could record temporaries in target block
7953 rather than in containing function's local_decls chain,
7954 which would mean cgraph missed finalizing them. Do it now. */
7955 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
7956 if (TREE_CODE (t) == VAR_DECL
7957 && TREE_STATIC (t)
7958 && !DECL_EXTERNAL (t))
7959 varpool_node::finalize_decl (t);
7960 DECL_SAVED_TREE (child_fn) = NULL;
7961 /* We'll create a CFG for child_fn, so no gimple body is needed. */
7962 gimple_set_body (child_fn, NULL);
7963 TREE_USED (block) = 1;
7965 /* Reset DECL_CONTEXT on function arguments. */
7966 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7967 DECL_CONTEXT (t) = child_fn;
7969 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
7970 so that it can be moved to the child function. */
7971 gsi = gsi_last_bb (entry_bb);
7972 stmt = gsi_stmt (gsi);
7973 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
7974 && gimple_omp_target_kind (stmt)
7975 == GF_OMP_TARGET_KIND_REGION);
7976 gsi_remove (&gsi, true);
7977 e = split_block (entry_bb, stmt);
7978 entry_bb = e->dest;
7979 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7981 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
7982 if (exit_bb)
7984 gsi = gsi_last_bb (exit_bb);
7985 gcc_assert (!gsi_end_p (gsi)
7986 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7987 stmt = gimple_build_return (NULL);
7988 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7989 gsi_remove (&gsi, true);
7992 /* Move the target region into CHILD_CFUN. */
7994 block = gimple_block (entry_stmt);
7996 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
7997 if (exit_bb)
7998 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
7999 /* When the OMP expansion process cannot guarantee an up-to-date
8000 loop tree arrange for the child function to fixup loops. */
8001 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8002 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8004 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8005 num = vec_safe_length (child_cfun->local_decls);
8006 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8008 t = (*child_cfun->local_decls)[srcidx];
8009 if (DECL_CONTEXT (t) == cfun->decl)
8010 continue;
8011 if (srcidx != dstidx)
8012 (*child_cfun->local_decls)[dstidx] = t;
8013 dstidx++;
8015 if (dstidx != num)
8016 vec_safe_truncate (child_cfun->local_decls, dstidx);
8018 /* Inform the callgraph about the new function. */
8019 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
8020 cgraph_node::add_new_function (child_fn, true);
8022 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8023 fixed in a following pass. */
8024 push_cfun (child_cfun);
8025 cgraph_edge::rebuild_edges ();
8027 /* Some EH regions might become dead, see PR34608. If
8028 pass_cleanup_cfg isn't the first pass to happen with the
8029 new child, these dead EH edges might cause problems.
8030 Clean them up now. */
8031 if (flag_exceptions)
8033 basic_block bb;
8034 bool changed = false;
8036 FOR_EACH_BB_FN (bb, cfun)
8037 changed |= gimple_purge_dead_eh_edges (bb);
8038 if (changed)
8039 cleanup_tree_cfg ();
8041 pop_cfun ();
8044 /* Emit a library call to launch the target region, or do data
8045 transfers. */
8046 tree t1, t2, t3, t4, device, cond, c, clauses;
8047 enum built_in_function start_ix;
8048 location_t clause_loc;
8050 clauses = gimple_omp_target_clauses (entry_stmt);
8052 if (kind == GF_OMP_TARGET_KIND_REGION)
8053 start_ix = BUILT_IN_GOMP_TARGET;
8054 else if (kind == GF_OMP_TARGET_KIND_DATA)
8055 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8056 else
8057 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8059 /* By default, the value of DEVICE is -1 (let runtime library choose)
8060 and there is no conditional. */
8061 cond = NULL_TREE;
8062 device = build_int_cst (integer_type_node, -1);
8064 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
8065 if (c)
8066 cond = OMP_CLAUSE_IF_EXPR (c);
8068 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
8069 if (c)
8071 device = OMP_CLAUSE_DEVICE_ID (c);
8072 clause_loc = OMP_CLAUSE_LOCATION (c);
8074 else
8075 clause_loc = gimple_location (entry_stmt);
8077 /* Ensure 'device' is of the correct type. */
8078 device = fold_convert_loc (clause_loc, integer_type_node, device);
8080 /* If we found the clause 'if (cond)', build
8081 (cond ? device : -2). */
8082 if (cond)
8084 cond = gimple_boolify (cond);
8086 basic_block cond_bb, then_bb, else_bb;
8087 edge e;
8088 tree tmp_var;
8090 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
8091 if (kind != GF_OMP_TARGET_KIND_REGION)
8093 gsi = gsi_last_bb (new_bb);
8094 gsi_prev (&gsi);
8095 e = split_block (new_bb, gsi_stmt (gsi));
8097 else
8098 e = split_block (new_bb, NULL);
8099 cond_bb = e->src;
8100 new_bb = e->dest;
8101 remove_edge (e);
8103 then_bb = create_empty_bb (cond_bb);
8104 else_bb = create_empty_bb (then_bb);
8105 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
8106 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
8108 stmt = gimple_build_cond_empty (cond);
8109 gsi = gsi_last_bb (cond_bb);
8110 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8112 gsi = gsi_start_bb (then_bb);
8113 stmt = gimple_build_assign (tmp_var, device);
8114 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8116 gsi = gsi_start_bb (else_bb);
8117 stmt = gimple_build_assign (tmp_var,
8118 build_int_cst (integer_type_node, -2));
8119 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8121 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
8122 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
8123 add_bb_to_loop (then_bb, cond_bb->loop_father);
8124 add_bb_to_loop (else_bb, cond_bb->loop_father);
8125 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
8126 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
8128 device = tmp_var;
8131 gsi = gsi_last_bb (new_bb);
8132 t = gimple_omp_target_data_arg (entry_stmt);
8133 if (t == NULL)
8135 t1 = size_zero_node;
8136 t2 = build_zero_cst (ptr_type_node);
8137 t3 = t2;
8138 t4 = t2;
8140 else
8142 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8143 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8144 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8145 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8146 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8149 gimple g;
8150 /* FIXME: This will be address of
8151 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8152 symbol, as soon as the linker plugin is able to create it for us. */
8153 tree openmp_target = build_zero_cst (ptr_type_node);
8154 if (kind == GF_OMP_TARGET_KIND_REGION)
8156 tree fnaddr = build_fold_addr_expr (child_fn);
8157 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8158 device, fnaddr, openmp_target, t1, t2, t3, t4);
8160 else
8161 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8162 device, openmp_target, t1, t2, t3, t4);
8163 gimple_set_location (g, gimple_location (entry_stmt));
8164 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8165 if (kind != GF_OMP_TARGET_KIND_REGION)
8167 g = gsi_stmt (gsi);
8168 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8169 gsi_remove (&gsi, true);
8171 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8173 gsi = gsi_last_bb (region->exit);
8174 g = gsi_stmt (gsi);
8175 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8176 gsi_remove (&gsi, true);
8181 /* Expand the parallel region tree rooted at REGION. Expansion
8182 proceeds in depth-first order. Innermost regions are expanded
8183 first. This way, parallel regions that require a new function to
8184 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8185 internal dependencies in their body. */
8187 static void
8188 expand_omp (struct omp_region *region)
8190 while (region)
8192 location_t saved_location;
8193 gimple inner_stmt = NULL;
8195 /* First, determine whether this is a combined parallel+workshare
8196 region. */
8197 if (region->type == GIMPLE_OMP_PARALLEL)
8198 determine_parallel_type (region);
8200 if (region->type == GIMPLE_OMP_FOR
8201 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8202 inner_stmt = last_stmt (region->inner->entry);
8204 if (region->inner)
8205 expand_omp (region->inner);
8207 saved_location = input_location;
8208 if (gimple_has_location (last_stmt (region->entry)))
8209 input_location = gimple_location (last_stmt (region->entry));
8211 switch (region->type)
8213 case GIMPLE_OMP_PARALLEL:
8214 case GIMPLE_OMP_TASK:
8215 expand_omp_taskreg (region);
8216 break;
8218 case GIMPLE_OMP_FOR:
8219 expand_omp_for (region, inner_stmt);
8220 break;
8222 case GIMPLE_OMP_SECTIONS:
8223 expand_omp_sections (region);
8224 break;
8226 case GIMPLE_OMP_SECTION:
8227 /* Individual omp sections are handled together with their
8228 parent GIMPLE_OMP_SECTIONS region. */
8229 break;
8231 case GIMPLE_OMP_SINGLE:
8232 expand_omp_single (region);
8233 break;
8235 case GIMPLE_OMP_MASTER:
8236 case GIMPLE_OMP_TASKGROUP:
8237 case GIMPLE_OMP_ORDERED:
8238 case GIMPLE_OMP_CRITICAL:
8239 case GIMPLE_OMP_TEAMS:
8240 expand_omp_synch (region);
8241 break;
8243 case GIMPLE_OMP_ATOMIC_LOAD:
8244 expand_omp_atomic (region);
8245 break;
8247 case GIMPLE_OMP_TARGET:
8248 expand_omp_target (region);
8249 break;
8251 default:
8252 gcc_unreachable ();
8255 input_location = saved_location;
8256 region = region->next;
8261 /* Helper for build_omp_regions. Scan the dominator tree starting at
8262 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8263 true, the function ends once a single tree is built (otherwise, whole
8264 forest of OMP constructs may be built). */
8266 static void
8267 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8268 bool single_tree)
8270 gimple_stmt_iterator gsi;
8271 gimple stmt;
8272 basic_block son;
8274 gsi = gsi_last_bb (bb);
8275 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8277 struct omp_region *region;
8278 enum gimple_code code;
8280 stmt = gsi_stmt (gsi);
8281 code = gimple_code (stmt);
8282 if (code == GIMPLE_OMP_RETURN)
8284 /* STMT is the return point out of region PARENT. Mark it
8285 as the exit point and make PARENT the immediately
8286 enclosing region. */
8287 gcc_assert (parent);
8288 region = parent;
8289 region->exit = bb;
8290 parent = parent->outer;
8292 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8294 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8295 GIMPLE_OMP_RETURN, but matches with
8296 GIMPLE_OMP_ATOMIC_LOAD. */
8297 gcc_assert (parent);
8298 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8299 region = parent;
8300 region->exit = bb;
8301 parent = parent->outer;
8304 else if (code == GIMPLE_OMP_CONTINUE)
8306 gcc_assert (parent);
8307 parent->cont = bb;
8309 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8311 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8312 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8315 else if (code == GIMPLE_OMP_TARGET
8316 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8317 new_omp_region (bb, code, parent);
8318 else
8320 /* Otherwise, this directive becomes the parent for a new
8321 region. */
8322 region = new_omp_region (bb, code, parent);
8323 parent = region;
8327 if (single_tree && !parent)
8328 return;
8330 for (son = first_dom_son (CDI_DOMINATORS, bb);
8331 son;
8332 son = next_dom_son (CDI_DOMINATORS, son))
8333 build_omp_regions_1 (son, parent, single_tree);
8336 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8337 root_omp_region. */
8339 static void
8340 build_omp_regions_root (basic_block root)
8342 gcc_assert (root_omp_region == NULL);
8343 build_omp_regions_1 (root, NULL, true);
8344 gcc_assert (root_omp_region != NULL);
8347 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8349 void
8350 omp_expand_local (basic_block head)
8352 build_omp_regions_root (head);
8353 if (dump_file && (dump_flags & TDF_DETAILS))
8355 fprintf (dump_file, "\nOMP region tree\n\n");
8356 dump_omp_region (dump_file, root_omp_region, 0);
8357 fprintf (dump_file, "\n");
8360 remove_exit_barriers (root_omp_region);
8361 expand_omp (root_omp_region);
8363 free_omp_regions ();
8366 /* Scan the CFG and build a tree of OMP regions. Return the root of
8367 the OMP region tree. */
8369 static void
8370 build_omp_regions (void)
8372 gcc_assert (root_omp_region == NULL);
8373 calculate_dominance_info (CDI_DOMINATORS);
8374 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8377 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8379 static unsigned int
8380 execute_expand_omp (void)
8382 build_omp_regions ();
8384 if (!root_omp_region)
8385 return 0;
8387 if (dump_file)
8389 fprintf (dump_file, "\nOMP region tree\n\n");
8390 dump_omp_region (dump_file, root_omp_region, 0);
8391 fprintf (dump_file, "\n");
8394 remove_exit_barriers (root_omp_region);
8396 expand_omp (root_omp_region);
8398 cleanup_tree_cfg ();
8400 free_omp_regions ();
8402 return 0;
8405 /* OMP expansion -- the default pass, run before creation of SSA form. */
8407 namespace {
8409 const pass_data pass_data_expand_omp =
8411 GIMPLE_PASS, /* type */
8412 "ompexp", /* name */
8413 OPTGROUP_NONE, /* optinfo_flags */
8414 TV_NONE, /* tv_id */
8415 PROP_gimple_any, /* properties_required */
8416 0, /* properties_provided */
8417 0, /* properties_destroyed */
8418 0, /* todo_flags_start */
8419 0, /* todo_flags_finish */
8422 class pass_expand_omp : public gimple_opt_pass
8424 public:
8425 pass_expand_omp (gcc::context *ctxt)
8426 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8429 /* opt_pass methods: */
8430 virtual bool gate (function *)
8432 return ((flag_openmp != 0 || flag_openmp_simd != 0
8433 || flag_cilkplus != 0) && !seen_error ());
8436 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8438 }; // class pass_expand_omp
8440 } // anon namespace
8442 gimple_opt_pass *
8443 make_pass_expand_omp (gcc::context *ctxt)
8445 return new pass_expand_omp (ctxt);
8448 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8450 /* If ctx is a worksharing context inside of a cancellable parallel
8451 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8452 and conditional branch to parallel's cancel_label to handle
8453 cancellation in the implicit barrier. */
8455 static void
8456 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8458 gimple omp_return = gimple_seq_last_stmt (*body);
8459 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8460 if (gimple_omp_return_nowait_p (omp_return))
8461 return;
8462 if (ctx->outer
8463 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8464 && ctx->outer->cancellable)
8466 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
8467 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
8468 tree lhs = create_tmp_var (c_bool_type, NULL);
8469 gimple_omp_return_set_lhs (omp_return, lhs);
8470 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8471 gimple g = gimple_build_cond (NE_EXPR, lhs,
8472 fold_convert (c_bool_type,
8473 boolean_false_node),
8474 ctx->outer->cancel_label, fallthru_label);
8475 gimple_seq_add_stmt (body, g);
8476 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8480 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8481 CTX is the enclosing OMP context for the current statement. */
8483 static void
8484 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8486 tree block, control;
8487 gimple_stmt_iterator tgsi;
8488 gimple stmt, new_stmt, bind, t;
8489 gimple_seq ilist, dlist, olist, new_body;
8491 stmt = gsi_stmt (*gsi_p);
8493 push_gimplify_context ();
8495 dlist = NULL;
8496 ilist = NULL;
8497 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8498 &ilist, &dlist, ctx, NULL);
8500 new_body = gimple_omp_body (stmt);
8501 gimple_omp_set_body (stmt, NULL);
8502 tgsi = gsi_start (new_body);
8503 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8505 omp_context *sctx;
8506 gimple sec_start;
8508 sec_start = gsi_stmt (tgsi);
8509 sctx = maybe_lookup_ctx (sec_start);
8510 gcc_assert (sctx);
8512 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8513 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8514 GSI_CONTINUE_LINKING);
8515 gimple_omp_set_body (sec_start, NULL);
8517 if (gsi_one_before_end_p (tgsi))
8519 gimple_seq l = NULL;
8520 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8521 &l, ctx);
8522 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8523 gimple_omp_section_set_last (sec_start);
8526 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8527 GSI_CONTINUE_LINKING);
8530 block = make_node (BLOCK);
8531 bind = gimple_build_bind (NULL, new_body, block);
8533 olist = NULL;
8534 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
8536 block = make_node (BLOCK);
8537 new_stmt = gimple_build_bind (NULL, NULL, block);
8538 gsi_replace (gsi_p, new_stmt, true);
8540 pop_gimplify_context (new_stmt);
8541 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8542 BLOCK_VARS (block) = gimple_bind_vars (bind);
8543 if (BLOCK_VARS (block))
8544 TREE_USED (block) = 1;
8546 new_body = NULL;
8547 gimple_seq_add_seq (&new_body, ilist);
8548 gimple_seq_add_stmt (&new_body, stmt);
8549 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8550 gimple_seq_add_stmt (&new_body, bind);
8552 control = create_tmp_var (unsigned_type_node, ".section");
8553 t = gimple_build_omp_continue (control, control);
8554 gimple_omp_sections_set_control (stmt, control);
8555 gimple_seq_add_stmt (&new_body, t);
8557 gimple_seq_add_seq (&new_body, olist);
8558 if (ctx->cancellable)
8559 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
8560 gimple_seq_add_seq (&new_body, dlist);
8562 new_body = maybe_catch_exception (new_body);
8564 t = gimple_build_omp_return
8565 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8566 OMP_CLAUSE_NOWAIT));
8567 gimple_seq_add_stmt (&new_body, t);
8568 maybe_add_implicit_barrier_cancel (ctx, &new_body);
8570 gimple_bind_set_body (new_stmt, new_body);
8574 /* A subroutine of lower_omp_single. Expand the simple form of
8575 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8577 if (GOMP_single_start ())
8578 BODY;
8579 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8581 FIXME. It may be better to delay expanding the logic of this until
8582 pass_expand_omp. The expanded logic may make the job more difficult
8583 to a synchronization analysis pass. */
8585 static void
8586 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
8588 location_t loc = gimple_location (single_stmt);
8589 tree tlabel = create_artificial_label (loc);
8590 tree flabel = create_artificial_label (loc);
8591 gimple call, cond;
8592 tree lhs, decl;
8594 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
8595 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8596 call = gimple_build_call (decl, 0);
8597 gimple_call_set_lhs (call, lhs);
8598 gimple_seq_add_stmt (pre_p, call);
8600 cond = gimple_build_cond (EQ_EXPR, lhs,
8601 fold_convert_loc (loc, TREE_TYPE (lhs),
8602 boolean_true_node),
8603 tlabel, flabel);
8604 gimple_seq_add_stmt (pre_p, cond);
8605 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
8606 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8607 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
8611 /* A subroutine of lower_omp_single. Expand the simple form of
8612 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
8614 #pragma omp single copyprivate (a, b, c)
8616 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
8619 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
8621 BODY;
8622 copyout.a = a;
8623 copyout.b = b;
8624 copyout.c = c;
8625 GOMP_single_copy_end (&copyout);
8627 else
8629 a = copyout_p->a;
8630 b = copyout_p->b;
8631 c = copyout_p->c;
8633 GOMP_barrier ();
8636 FIXME. It may be better to delay expanding the logic of this until
8637 pass_expand_omp. The expanded logic may make the job more difficult
8638 to a synchronization analysis pass. */
8640 static void
8641 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
8643 tree ptr_type, t, l0, l1, l2, bfn_decl;
8644 gimple_seq copyin_seq;
8645 location_t loc = gimple_location (single_stmt);
8647 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
8649 ptr_type = build_pointer_type (ctx->record_type);
8650 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
8652 l0 = create_artificial_label (loc);
8653 l1 = create_artificial_label (loc);
8654 l2 = create_artificial_label (loc);
8656 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
8657 t = build_call_expr_loc (loc, bfn_decl, 0);
8658 t = fold_convert_loc (loc, ptr_type, t);
8659 gimplify_assign (ctx->receiver_decl, t, pre_p);
8661 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
8662 build_int_cst (ptr_type, 0));
8663 t = build3 (COND_EXPR, void_type_node, t,
8664 build_and_jump (&l0), build_and_jump (&l1));
8665 gimplify_and_add (t, pre_p);
8667 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
8669 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8671 copyin_seq = NULL;
8672 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
8673 &copyin_seq, ctx);
8675 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
8676 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
8677 t = build_call_expr_loc (loc, bfn_decl, 1, t);
8678 gimplify_and_add (t, pre_p);
8680 t = build_and_jump (&l2);
8681 gimplify_and_add (t, pre_p);
8683 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
8685 gimple_seq_add_seq (pre_p, copyin_seq);
8687 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
8691 /* Expand code for an OpenMP single directive. */
8693 static void
8694 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8696 tree block;
8697 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
8698 gimple_seq bind_body, bind_body_tail = NULL, dlist;
8700 push_gimplify_context ();
8702 block = make_node (BLOCK);
8703 bind = gimple_build_bind (NULL, NULL, block);
8704 gsi_replace (gsi_p, bind, true);
8705 bind_body = NULL;
8706 dlist = NULL;
8707 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
8708 &bind_body, &dlist, ctx, NULL);
8709 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
8711 gimple_seq_add_stmt (&bind_body, single_stmt);
8713 if (ctx->record_type)
8714 lower_omp_single_copy (single_stmt, &bind_body, ctx);
8715 else
8716 lower_omp_single_simple (single_stmt, &bind_body);
8718 gimple_omp_set_body (single_stmt, NULL);
8720 gimple_seq_add_seq (&bind_body, dlist);
8722 bind_body = maybe_catch_exception (bind_body);
8724 t = gimple_build_omp_return
8725 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
8726 OMP_CLAUSE_NOWAIT));
8727 gimple_seq_add_stmt (&bind_body_tail, t);
8728 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
8729 if (ctx->record_type)
8731 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
8732 tree clobber = build_constructor (ctx->record_type, NULL);
8733 TREE_THIS_VOLATILE (clobber) = 1;
8734 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
8735 clobber), GSI_SAME_STMT);
8737 gimple_seq_add_seq (&bind_body, bind_body_tail);
8738 gimple_bind_set_body (bind, bind_body);
8740 pop_gimplify_context (bind);
8742 gimple_bind_append_vars (bind, ctx->block_vars);
8743 BLOCK_VARS (block) = ctx->block_vars;
8744 if (BLOCK_VARS (block))
8745 TREE_USED (block) = 1;
8749 /* Expand code for an OpenMP master directive. */
8751 static void
8752 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8754 tree block, lab = NULL, x, bfn_decl;
8755 gimple stmt = gsi_stmt (*gsi_p), bind;
8756 location_t loc = gimple_location (stmt);
8757 gimple_seq tseq;
8759 push_gimplify_context ();
8761 block = make_node (BLOCK);
8762 bind = gimple_build_bind (NULL, NULL, block);
8763 gsi_replace (gsi_p, bind, true);
8764 gimple_bind_add_stmt (bind, stmt);
8766 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
8767 x = build_call_expr_loc (loc, bfn_decl, 0);
8768 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
8769 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
8770 tseq = NULL;
8771 gimplify_and_add (x, &tseq);
8772 gimple_bind_add_seq (bind, tseq);
8774 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8775 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8776 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8777 gimple_omp_set_body (stmt, NULL);
8779 gimple_bind_add_stmt (bind, gimple_build_label (lab));
8781 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8783 pop_gimplify_context (bind);
8785 gimple_bind_append_vars (bind, ctx->block_vars);
8786 BLOCK_VARS (block) = ctx->block_vars;
8790 /* Expand code for an OpenMP taskgroup directive. */
8792 static void
8793 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8795 gimple stmt = gsi_stmt (*gsi_p), bind, x;
8796 tree block = make_node (BLOCK);
8798 bind = gimple_build_bind (NULL, NULL, block);
8799 gsi_replace (gsi_p, bind, true);
8800 gimple_bind_add_stmt (bind, stmt);
8802 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
8804 gimple_bind_add_stmt (bind, x);
8806 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8807 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8808 gimple_omp_set_body (stmt, NULL);
8810 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8812 gimple_bind_append_vars (bind, ctx->block_vars);
8813 BLOCK_VARS (block) = ctx->block_vars;
8817 /* Expand code for an OpenMP ordered directive. */
8819 static void
8820 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8822 tree block;
8823 gimple stmt = gsi_stmt (*gsi_p), bind, x;
8825 push_gimplify_context ();
8827 block = make_node (BLOCK);
8828 bind = gimple_build_bind (NULL, NULL, block);
8829 gsi_replace (gsi_p, bind, true);
8830 gimple_bind_add_stmt (bind, stmt);
8832 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
8834 gimple_bind_add_stmt (bind, x);
8836 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8837 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8838 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8839 gimple_omp_set_body (stmt, NULL);
8841 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
8842 gimple_bind_add_stmt (bind, x);
8844 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8846 pop_gimplify_context (bind);
8848 gimple_bind_append_vars (bind, ctx->block_vars);
8849 BLOCK_VARS (block) = gimple_bind_vars (bind);
8853 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
8854 substitution of a couple of function calls. But in the NAMED case,
8855 requires that languages coordinate a symbol name. It is therefore
8856 best put here in common code. */
8858 static GTY((param1_is (tree), param2_is (tree)))
8859 splay_tree critical_name_mutexes;
8861 static void
8862 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8864 tree block;
8865 tree name, lock, unlock;
8866 gimple stmt = gsi_stmt (*gsi_p), bind;
8867 location_t loc = gimple_location (stmt);
8868 gimple_seq tbody;
8870 name = gimple_omp_critical_name (stmt);
8871 if (name)
8873 tree decl;
8874 splay_tree_node n;
8876 if (!critical_name_mutexes)
8877 critical_name_mutexes
8878 = splay_tree_new_ggc (splay_tree_compare_pointers,
8879 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
8880 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
8882 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
8883 if (n == NULL)
8885 char *new_str;
8887 decl = create_tmp_var_raw (ptr_type_node, NULL);
8889 new_str = ACONCAT ((".gomp_critical_user_",
8890 IDENTIFIER_POINTER (name), NULL));
8891 DECL_NAME (decl) = get_identifier (new_str);
8892 TREE_PUBLIC (decl) = 1;
8893 TREE_STATIC (decl) = 1;
8894 DECL_COMMON (decl) = 1;
8895 DECL_ARTIFICIAL (decl) = 1;
8896 DECL_IGNORED_P (decl) = 1;
8897 varpool_node::finalize_decl (decl);
8899 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
8900 (splay_tree_value) decl);
8902 else
8903 decl = (tree) n->value;
8905 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
8906 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
8908 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
8909 unlock = build_call_expr_loc (loc, unlock, 1,
8910 build_fold_addr_expr_loc (loc, decl));
8912 else
8914 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
8915 lock = build_call_expr_loc (loc, lock, 0);
8917 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
8918 unlock = build_call_expr_loc (loc, unlock, 0);
8921 push_gimplify_context ();
8923 block = make_node (BLOCK);
8924 bind = gimple_build_bind (NULL, NULL, block);
8925 gsi_replace (gsi_p, bind, true);
8926 gimple_bind_add_stmt (bind, stmt);
8928 tbody = gimple_bind_body (bind);
8929 gimplify_and_add (lock, &tbody);
8930 gimple_bind_set_body (bind, tbody);
8932 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8933 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8934 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8935 gimple_omp_set_body (stmt, NULL);
8937 tbody = gimple_bind_body (bind);
8938 gimplify_and_add (unlock, &tbody);
8939 gimple_bind_set_body (bind, tbody);
8941 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8943 pop_gimplify_context (bind);
8944 gimple_bind_append_vars (bind, ctx->block_vars);
8945 BLOCK_VARS (block) = gimple_bind_vars (bind);
8949 /* A subroutine of lower_omp_for. Generate code to emit the predicate
8950 for a lastprivate clause. Given a loop control predicate of (V
8951 cond N2), we gate the clause on (!(V cond N2)). The lowered form
8952 is appended to *DLIST, iterator initialization is appended to
8953 *BODY_P. */
8955 static void
8956 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
8957 gimple_seq *dlist, struct omp_context *ctx)
8959 tree clauses, cond, vinit;
8960 enum tree_code cond_code;
8961 gimple_seq stmts;
8963 cond_code = fd->loop.cond_code;
8964 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
8966 /* When possible, use a strict equality expression. This can let VRP
8967 type optimizations deduce the value and remove a copy. */
8968 if (tree_fits_shwi_p (fd->loop.step))
8970 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
8971 if (step == 1 || step == -1)
8972 cond_code = EQ_EXPR;
8975 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
8977 clauses = gimple_omp_for_clauses (fd->for_stmt);
8978 stmts = NULL;
8979 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
8980 if (!gimple_seq_empty_p (stmts))
8982 gimple_seq_add_seq (&stmts, *dlist);
8983 *dlist = stmts;
8985 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
8986 vinit = fd->loop.n1;
8987 if (cond_code == EQ_EXPR
8988 && tree_fits_shwi_p (fd->loop.n2)
8989 && ! integer_zerop (fd->loop.n2))
8990 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
8991 else
8992 vinit = unshare_expr (vinit);
8994 /* Initialize the iterator variable, so that threads that don't execute
8995 any iterations don't execute the lastprivate clauses by accident. */
8996 gimplify_assign (fd->loop.v, vinit, body_p);
9001 /* Lower code for an OpenMP loop directive. */
9003 static void
9004 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9006 tree *rhs_p, block;
9007 struct omp_for_data fd, *fdp = NULL;
9008 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
9009 gimple_seq omp_for_body, body, dlist;
9010 size_t i;
9012 push_gimplify_context ();
9014 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
9016 block = make_node (BLOCK);
9017 new_stmt = gimple_build_bind (NULL, NULL, block);
9018 /* Replace at gsi right away, so that 'stmt' is no member
9019 of a sequence anymore as we're going to add to to a different
9020 one below. */
9021 gsi_replace (gsi_p, new_stmt, true);
9023 /* Move declaration of temporaries in the loop body before we make
9024 it go away. */
9025 omp_for_body = gimple_omp_body (stmt);
9026 if (!gimple_seq_empty_p (omp_for_body)
9027 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
9029 gimple inner_bind = gimple_seq_first_stmt (omp_for_body);
9030 tree vars = gimple_bind_vars (inner_bind);
9031 gimple_bind_append_vars (new_stmt, vars);
9032 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
9033 keep them on the inner_bind and it's block. */
9034 gimple_bind_set_vars (inner_bind, NULL_TREE);
9035 if (gimple_bind_block (inner_bind))
9036 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
9039 if (gimple_omp_for_combined_into_p (stmt))
9041 extract_omp_for_data (stmt, &fd, NULL);
9042 fdp = &fd;
9044 /* We need two temporaries with fd.loop.v type (istart/iend)
9045 and then (fd.collapse - 1) temporaries with the same
9046 type for count2 ... countN-1 vars if not constant. */
9047 size_t count = 2;
9048 tree type = fd.iter_type;
9049 if (fd.collapse > 1
9050 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
9051 count += fd.collapse - 1;
9052 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
9053 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
9054 tree clauses = *pc;
9055 if (parallel_for)
9056 outerc
9057 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
9058 OMP_CLAUSE__LOOPTEMP_);
9059 for (i = 0; i < count; i++)
9061 tree temp;
9062 if (parallel_for)
9064 gcc_assert (outerc);
9065 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
9066 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
9067 OMP_CLAUSE__LOOPTEMP_);
9069 else
9071 temp = create_tmp_var (type, NULL);
9072 insert_decl_map (&ctx->outer->cb, temp, temp);
9074 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
9075 OMP_CLAUSE_DECL (*pc) = temp;
9076 pc = &OMP_CLAUSE_CHAIN (*pc);
9078 *pc = clauses;
9081 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9082 dlist = NULL;
9083 body = NULL;
9084 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
9085 fdp);
9086 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
9088 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9090 /* Lower the header expressions. At this point, we can assume that
9091 the header is of the form:
9093 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9095 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9096 using the .omp_data_s mapping, if needed. */
9097 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
9099 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
9100 if (!is_gimple_min_invariant (*rhs_p))
9101 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9103 rhs_p = gimple_omp_for_final_ptr (stmt, i);
9104 if (!is_gimple_min_invariant (*rhs_p))
9105 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9107 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
9108 if (!is_gimple_min_invariant (*rhs_p))
9109 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9112 /* Once lowered, extract the bounds and clauses. */
9113 extract_omp_for_data (stmt, &fd, NULL);
9115 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
9117 gimple_seq_add_stmt (&body, stmt);
9118 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
9120 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
9121 fd.loop.v));
9123 /* After the loop, add exit clauses. */
9124 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
9126 if (ctx->cancellable)
9127 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
9129 gimple_seq_add_seq (&body, dlist);
9131 body = maybe_catch_exception (body);
9133 /* Region exit marker goes at the end of the loop body. */
9134 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
9135 maybe_add_implicit_barrier_cancel (ctx, &body);
9136 pop_gimplify_context (new_stmt);
9138 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9139 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
9140 if (BLOCK_VARS (block))
9141 TREE_USED (block) = 1;
9143 gimple_bind_set_body (new_stmt, body);
9144 gimple_omp_set_body (stmt, NULL);
9145 gimple_omp_for_set_pre_body (stmt, NULL);
9148 /* Callback for walk_stmts. Check if the current statement only contains
9149 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9151 static tree
9152 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9153 bool *handled_ops_p,
9154 struct walk_stmt_info *wi)
9156 int *info = (int *) wi->info;
9157 gimple stmt = gsi_stmt (*gsi_p);
9159 *handled_ops_p = true;
9160 switch (gimple_code (stmt))
9162 WALK_SUBSTMTS;
9164 case GIMPLE_OMP_FOR:
9165 case GIMPLE_OMP_SECTIONS:
9166 *info = *info == 0 ? 1 : -1;
9167 break;
9168 default:
9169 *info = -1;
9170 break;
9172 return NULL;
9175 struct omp_taskcopy_context
9177 /* This field must be at the beginning, as we do "inheritance": Some
9178 callback functions for tree-inline.c (e.g., omp_copy_decl)
9179 receive a copy_body_data pointer that is up-casted to an
9180 omp_context pointer. */
9181 copy_body_data cb;
9182 omp_context *ctx;
9185 static tree
9186 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9188 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9190 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9191 return create_tmp_var (TREE_TYPE (var), NULL);
9193 return var;
9196 static tree
9197 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9199 tree name, new_fields = NULL, type, f;
9201 type = lang_hooks.types.make_type (RECORD_TYPE);
9202 name = DECL_NAME (TYPE_NAME (orig_type));
9203 name = build_decl (gimple_location (tcctx->ctx->stmt),
9204 TYPE_DECL, name, type);
9205 TYPE_NAME (type) = name;
9207 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9209 tree new_f = copy_node (f);
9210 DECL_CONTEXT (new_f) = type;
9211 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9212 TREE_CHAIN (new_f) = new_fields;
9213 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9214 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9215 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9216 &tcctx->cb, NULL);
9217 new_fields = new_f;
9218 tcctx->cb.decl_map->put (f, new_f);
9220 TYPE_FIELDS (type) = nreverse (new_fields);
9221 layout_type (type);
9222 return type;
9225 /* Create task copyfn. */
9227 static void
9228 create_task_copyfn (gimple task_stmt, omp_context *ctx)
9230 struct function *child_cfun;
9231 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9232 tree record_type, srecord_type, bind, list;
9233 bool record_needs_remap = false, srecord_needs_remap = false;
9234 splay_tree_node n;
9235 struct omp_taskcopy_context tcctx;
9236 location_t loc = gimple_location (task_stmt);
9238 child_fn = gimple_omp_task_copy_fn (task_stmt);
9239 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9240 gcc_assert (child_cfun->cfg == NULL);
9241 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9243 /* Reset DECL_CONTEXT on function arguments. */
9244 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9245 DECL_CONTEXT (t) = child_fn;
9247 /* Populate the function. */
9248 push_gimplify_context ();
9249 push_cfun (child_cfun);
9251 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9252 TREE_SIDE_EFFECTS (bind) = 1;
9253 list = NULL;
9254 DECL_SAVED_TREE (child_fn) = bind;
9255 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9257 /* Remap src and dst argument types if needed. */
9258 record_type = ctx->record_type;
9259 srecord_type = ctx->srecord_type;
9260 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9261 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9263 record_needs_remap = true;
9264 break;
9266 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9267 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9269 srecord_needs_remap = true;
9270 break;
9273 if (record_needs_remap || srecord_needs_remap)
9275 memset (&tcctx, '\0', sizeof (tcctx));
9276 tcctx.cb.src_fn = ctx->cb.src_fn;
9277 tcctx.cb.dst_fn = child_fn;
9278 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
9279 gcc_checking_assert (tcctx.cb.src_node);
9280 tcctx.cb.dst_node = tcctx.cb.src_node;
9281 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9282 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9283 tcctx.cb.eh_lp_nr = 0;
9284 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9285 tcctx.cb.decl_map = new hash_map<tree, tree>;
9286 tcctx.ctx = ctx;
9288 if (record_needs_remap)
9289 record_type = task_copyfn_remap_type (&tcctx, record_type);
9290 if (srecord_needs_remap)
9291 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9293 else
9294 tcctx.cb.decl_map = NULL;
9296 arg = DECL_ARGUMENTS (child_fn);
9297 TREE_TYPE (arg) = build_pointer_type (record_type);
9298 sarg = DECL_CHAIN (arg);
9299 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9301 /* First pass: initialize temporaries used in record_type and srecord_type
9302 sizes and field offsets. */
9303 if (tcctx.cb.decl_map)
9304 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9305 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9307 tree *p;
9309 decl = OMP_CLAUSE_DECL (c);
9310 p = tcctx.cb.decl_map->get (decl);
9311 if (p == NULL)
9312 continue;
9313 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9314 sf = (tree) n->value;
9315 sf = *tcctx.cb.decl_map->get (sf);
9316 src = build_simple_mem_ref_loc (loc, sarg);
9317 src = omp_build_component_ref (src, sf);
9318 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9319 append_to_statement_list (t, &list);
9322 /* Second pass: copy shared var pointers and copy construct non-VLA
9323 firstprivate vars. */
9324 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9325 switch (OMP_CLAUSE_CODE (c))
9327 case OMP_CLAUSE_SHARED:
9328 decl = OMP_CLAUSE_DECL (c);
9329 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9330 if (n == NULL)
9331 break;
9332 f = (tree) n->value;
9333 if (tcctx.cb.decl_map)
9334 f = *tcctx.cb.decl_map->get (f);
9335 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9336 sf = (tree) n->value;
9337 if (tcctx.cb.decl_map)
9338 sf = *tcctx.cb.decl_map->get (sf);
9339 src = build_simple_mem_ref_loc (loc, sarg);
9340 src = omp_build_component_ref (src, sf);
9341 dst = build_simple_mem_ref_loc (loc, arg);
9342 dst = omp_build_component_ref (dst, f);
9343 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9344 append_to_statement_list (t, &list);
9345 break;
9346 case OMP_CLAUSE_FIRSTPRIVATE:
9347 decl = OMP_CLAUSE_DECL (c);
9348 if (is_variable_sized (decl))
9349 break;
9350 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9351 if (n == NULL)
9352 break;
9353 f = (tree) n->value;
9354 if (tcctx.cb.decl_map)
9355 f = *tcctx.cb.decl_map->get (f);
9356 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9357 if (n != NULL)
9359 sf = (tree) n->value;
9360 if (tcctx.cb.decl_map)
9361 sf = *tcctx.cb.decl_map->get (sf);
9362 src = build_simple_mem_ref_loc (loc, sarg);
9363 src = omp_build_component_ref (src, sf);
9364 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9365 src = build_simple_mem_ref_loc (loc, src);
9367 else
9368 src = decl;
9369 dst = build_simple_mem_ref_loc (loc, arg);
9370 dst = omp_build_component_ref (dst, f);
9371 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9372 append_to_statement_list (t, &list);
9373 break;
9374 case OMP_CLAUSE_PRIVATE:
9375 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9376 break;
9377 decl = OMP_CLAUSE_DECL (c);
9378 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9379 f = (tree) n->value;
9380 if (tcctx.cb.decl_map)
9381 f = *tcctx.cb.decl_map->get (f);
9382 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9383 if (n != NULL)
9385 sf = (tree) n->value;
9386 if (tcctx.cb.decl_map)
9387 sf = *tcctx.cb.decl_map->get (sf);
9388 src = build_simple_mem_ref_loc (loc, sarg);
9389 src = omp_build_component_ref (src, sf);
9390 if (use_pointer_for_field (decl, NULL))
9391 src = build_simple_mem_ref_loc (loc, src);
9393 else
9394 src = decl;
9395 dst = build_simple_mem_ref_loc (loc, arg);
9396 dst = omp_build_component_ref (dst, f);
9397 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9398 append_to_statement_list (t, &list);
9399 break;
9400 default:
9401 break;
9404 /* Last pass: handle VLA firstprivates. */
9405 if (tcctx.cb.decl_map)
9406 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9407 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9409 tree ind, ptr, df;
9411 decl = OMP_CLAUSE_DECL (c);
9412 if (!is_variable_sized (decl))
9413 continue;
9414 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9415 if (n == NULL)
9416 continue;
9417 f = (tree) n->value;
9418 f = *tcctx.cb.decl_map->get (f);
9419 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9420 ind = DECL_VALUE_EXPR (decl);
9421 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9422 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9423 n = splay_tree_lookup (ctx->sfield_map,
9424 (splay_tree_key) TREE_OPERAND (ind, 0));
9425 sf = (tree) n->value;
9426 sf = *tcctx.cb.decl_map->get (sf);
9427 src = build_simple_mem_ref_loc (loc, sarg);
9428 src = omp_build_component_ref (src, sf);
9429 src = build_simple_mem_ref_loc (loc, src);
9430 dst = build_simple_mem_ref_loc (loc, arg);
9431 dst = omp_build_component_ref (dst, f);
9432 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9433 append_to_statement_list (t, &list);
9434 n = splay_tree_lookup (ctx->field_map,
9435 (splay_tree_key) TREE_OPERAND (ind, 0));
9436 df = (tree) n->value;
9437 df = *tcctx.cb.decl_map->get (df);
9438 ptr = build_simple_mem_ref_loc (loc, arg);
9439 ptr = omp_build_component_ref (ptr, df);
9440 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9441 build_fold_addr_expr_loc (loc, dst));
9442 append_to_statement_list (t, &list);
9445 t = build1 (RETURN_EXPR, void_type_node, NULL);
9446 append_to_statement_list (t, &list);
9448 if (tcctx.cb.decl_map)
9449 delete tcctx.cb.decl_map;
9450 pop_gimplify_context (NULL);
9451 BIND_EXPR_BODY (bind) = list;
9452 pop_cfun ();
9455 static void
9456 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9458 tree c, clauses;
9459 gimple g;
9460 size_t n_in = 0, n_out = 0, idx = 2, i;
9462 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9463 OMP_CLAUSE_DEPEND);
9464 gcc_assert (clauses);
9465 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9466 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9467 switch (OMP_CLAUSE_DEPEND_KIND (c))
9469 case OMP_CLAUSE_DEPEND_IN:
9470 n_in++;
9471 break;
9472 case OMP_CLAUSE_DEPEND_OUT:
9473 case OMP_CLAUSE_DEPEND_INOUT:
9474 n_out++;
9475 break;
9476 default:
9477 gcc_unreachable ();
9479 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9480 tree array = create_tmp_var (type, NULL);
9481 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9482 NULL_TREE);
9483 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9484 gimple_seq_add_stmt (iseq, g);
9485 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9486 NULL_TREE);
9487 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9488 gimple_seq_add_stmt (iseq, g);
9489 for (i = 0; i < 2; i++)
9491 if ((i ? n_in : n_out) == 0)
9492 continue;
9493 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9494 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9495 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9497 tree t = OMP_CLAUSE_DECL (c);
9498 t = fold_convert (ptr_type_node, t);
9499 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9500 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9501 NULL_TREE, NULL_TREE);
9502 g = gimple_build_assign (r, t);
9503 gimple_seq_add_stmt (iseq, g);
9506 tree *p = gimple_omp_task_clauses_ptr (stmt);
9507 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9508 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9509 OMP_CLAUSE_CHAIN (c) = *p;
9510 *p = c;
9511 tree clobber = build_constructor (type, NULL);
9512 TREE_THIS_VOLATILE (clobber) = 1;
9513 g = gimple_build_assign (array, clobber);
9514 gimple_seq_add_stmt (oseq, g);
9517 /* Lower the OpenMP parallel or task directive in the current statement
9518 in GSI_P. CTX holds context information for the directive. */
9520 static void
9521 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9523 tree clauses;
9524 tree child_fn, t;
9525 gimple stmt = gsi_stmt (*gsi_p);
9526 gimple par_bind, bind, dep_bind = NULL;
9527 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9528 location_t loc = gimple_location (stmt);
9530 clauses = gimple_omp_taskreg_clauses (stmt);
9531 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9532 par_body = gimple_bind_body (par_bind);
9533 child_fn = ctx->cb.dst_fn;
9534 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9535 && !gimple_omp_parallel_combined_p (stmt))
9537 struct walk_stmt_info wi;
9538 int ws_num = 0;
9540 memset (&wi, 0, sizeof (wi));
9541 wi.info = &ws_num;
9542 wi.val_only = true;
9543 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
9544 if (ws_num == 1)
9545 gimple_omp_parallel_set_combined_p (stmt, true);
9547 gimple_seq dep_ilist = NULL;
9548 gimple_seq dep_olist = NULL;
9549 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9550 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9552 push_gimplify_context ();
9553 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9554 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9557 if (ctx->srecord_type)
9558 create_task_copyfn (stmt, ctx);
9560 push_gimplify_context ();
9562 par_olist = NULL;
9563 par_ilist = NULL;
9564 par_rlist = NULL;
9565 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
9566 lower_omp (&par_body, ctx);
9567 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
9568 lower_reduction_clauses (clauses, &par_rlist, ctx);
9570 /* Declare all the variables created by mapping and the variables
9571 declared in the scope of the parallel body. */
9572 record_vars_into (ctx->block_vars, child_fn);
9573 record_vars_into (gimple_bind_vars (par_bind), child_fn);
9575 if (ctx->record_type)
9577 ctx->sender_decl
9578 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9579 : ctx->record_type, ".omp_data_o");
9580 DECL_NAMELESS (ctx->sender_decl) = 1;
9581 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9582 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
9585 olist = NULL;
9586 ilist = NULL;
9587 lower_send_clauses (clauses, &ilist, &olist, ctx);
9588 lower_send_shared_vars (&ilist, &olist, ctx);
9590 if (ctx->record_type)
9592 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
9593 TREE_THIS_VOLATILE (clobber) = 1;
9594 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9595 clobber));
9598 /* Once all the expansions are done, sequence all the different
9599 fragments inside gimple_omp_body. */
9601 new_body = NULL;
9603 if (ctx->record_type)
9605 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9606 /* fixup_child_record_type might have changed receiver_decl's type. */
9607 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9608 gimple_seq_add_stmt (&new_body,
9609 gimple_build_assign (ctx->receiver_decl, t));
9612 gimple_seq_add_seq (&new_body, par_ilist);
9613 gimple_seq_add_seq (&new_body, par_body);
9614 gimple_seq_add_seq (&new_body, par_rlist);
9615 if (ctx->cancellable)
9616 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
9617 gimple_seq_add_seq (&new_body, par_olist);
9618 new_body = maybe_catch_exception (new_body);
9619 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9620 gimple_omp_set_body (stmt, new_body);
9622 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
9623 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
9624 gimple_bind_add_seq (bind, ilist);
9625 gimple_bind_add_stmt (bind, stmt);
9626 gimple_bind_add_seq (bind, olist);
9628 pop_gimplify_context (NULL);
9630 if (dep_bind)
9632 gimple_bind_add_seq (dep_bind, dep_ilist);
9633 gimple_bind_add_stmt (dep_bind, bind);
9634 gimple_bind_add_seq (dep_bind, dep_olist);
9635 pop_gimplify_context (dep_bind);
9639 /* Lower the OpenMP target directive in the current statement
9640 in GSI_P. CTX holds context information for the directive. */
9642 static void
9643 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9645 tree clauses;
9646 tree child_fn, t, c;
9647 gimple stmt = gsi_stmt (*gsi_p);
9648 gimple tgt_bind = NULL, bind;
9649 gimple_seq tgt_body = NULL, olist, ilist, new_body;
9650 location_t loc = gimple_location (stmt);
9651 int kind = gimple_omp_target_kind (stmt);
9652 unsigned int map_cnt = 0;
9654 clauses = gimple_omp_target_clauses (stmt);
9655 if (kind == GF_OMP_TARGET_KIND_REGION)
9657 tgt_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9658 tgt_body = gimple_bind_body (tgt_bind);
9660 else if (kind == GF_OMP_TARGET_KIND_DATA)
9661 tgt_body = gimple_omp_body (stmt);
9662 child_fn = ctx->cb.dst_fn;
9664 push_gimplify_context ();
9666 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9667 switch (OMP_CLAUSE_CODE (c))
9669 tree var, x;
9671 default:
9672 break;
9673 case OMP_CLAUSE_MAP:
9674 case OMP_CLAUSE_TO:
9675 case OMP_CLAUSE_FROM:
9676 var = OMP_CLAUSE_DECL (c);
9677 if (!DECL_P (var))
9679 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
9680 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9681 map_cnt++;
9682 continue;
9685 if (DECL_SIZE (var)
9686 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
9688 tree var2 = DECL_VALUE_EXPR (var);
9689 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
9690 var2 = TREE_OPERAND (var2, 0);
9691 gcc_assert (DECL_P (var2));
9692 var = var2;
9695 if (!maybe_lookup_field (var, ctx))
9696 continue;
9698 if (kind == GF_OMP_TARGET_KIND_REGION)
9700 x = build_receiver_ref (var, true, ctx);
9701 tree new_var = lookup_decl (var, ctx);
9702 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9703 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9704 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9705 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
9706 x = build_simple_mem_ref (x);
9707 SET_DECL_VALUE_EXPR (new_var, x);
9708 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
9710 map_cnt++;
9713 if (kind == GF_OMP_TARGET_KIND_REGION)
9715 target_nesting_level++;
9716 lower_omp (&tgt_body, ctx);
9717 target_nesting_level--;
9719 else if (kind == GF_OMP_TARGET_KIND_DATA)
9720 lower_omp (&tgt_body, ctx);
9722 if (kind == GF_OMP_TARGET_KIND_REGION)
9724 /* Declare all the variables created by mapping and the variables
9725 declared in the scope of the target body. */
9726 record_vars_into (ctx->block_vars, child_fn);
9727 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
9730 olist = NULL;
9731 ilist = NULL;
9732 if (ctx->record_type)
9734 ctx->sender_decl
9735 = create_tmp_var (ctx->record_type, ".omp_data_arr");
9736 DECL_NAMELESS (ctx->sender_decl) = 1;
9737 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9738 t = make_tree_vec (3);
9739 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
9740 TREE_VEC_ELT (t, 1)
9741 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
9742 ".omp_data_sizes");
9743 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
9744 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
9745 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
9746 TREE_VEC_ELT (t, 2)
9747 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
9748 map_cnt),
9749 ".omp_data_kinds");
9750 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
9751 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
9752 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
9753 gimple_omp_target_set_data_arg (stmt, t);
9755 vec<constructor_elt, va_gc> *vsize;
9756 vec<constructor_elt, va_gc> *vkind;
9757 vec_alloc (vsize, map_cnt);
9758 vec_alloc (vkind, map_cnt);
9759 unsigned int map_idx = 0;
9761 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9762 switch (OMP_CLAUSE_CODE (c))
9764 tree ovar, nc;
9766 default:
9767 break;
9768 case OMP_CLAUSE_MAP:
9769 case OMP_CLAUSE_TO:
9770 case OMP_CLAUSE_FROM:
9771 nc = c;
9772 ovar = OMP_CLAUSE_DECL (c);
9773 if (!DECL_P (ovar))
9775 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9776 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9778 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
9779 == get_base_address (ovar));
9780 nc = OMP_CLAUSE_CHAIN (c);
9781 ovar = OMP_CLAUSE_DECL (nc);
9783 else
9785 tree x = build_sender_ref (ovar, ctx);
9786 tree v
9787 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
9788 gimplify_assign (x, v, &ilist);
9789 nc = NULL_TREE;
9792 else
9794 if (DECL_SIZE (ovar)
9795 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
9797 tree ovar2 = DECL_VALUE_EXPR (ovar);
9798 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
9799 ovar2 = TREE_OPERAND (ovar2, 0);
9800 gcc_assert (DECL_P (ovar2));
9801 ovar = ovar2;
9803 if (!maybe_lookup_field (ovar, ctx))
9804 continue;
9807 if (nc)
9809 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
9810 tree x = build_sender_ref (ovar, ctx);
9811 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9812 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9813 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9814 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
9816 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9817 tree avar
9818 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
9819 mark_addressable (avar);
9820 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
9821 avar = build_fold_addr_expr (avar);
9822 gimplify_assign (x, avar, &ilist);
9824 else if (is_gimple_reg (var))
9826 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9827 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
9828 mark_addressable (avar);
9829 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
9830 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
9831 gimplify_assign (avar, var, &ilist);
9832 avar = build_fold_addr_expr (avar);
9833 gimplify_assign (x, avar, &ilist);
9834 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
9835 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
9836 && !TYPE_READONLY (TREE_TYPE (var)))
9838 x = build_sender_ref (ovar, ctx);
9839 x = build_simple_mem_ref (x);
9840 gimplify_assign (var, x, &olist);
9843 else
9845 var = build_fold_addr_expr (var);
9846 gimplify_assign (x, var, &ilist);
9849 tree s = OMP_CLAUSE_SIZE (c);
9850 if (s == NULL_TREE)
9851 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
9852 s = fold_convert (size_type_node, s);
9853 tree purpose = size_int (map_idx++);
9854 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
9855 if (TREE_CODE (s) != INTEGER_CST)
9856 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
9858 unsigned char tkind = 0;
9859 switch (OMP_CLAUSE_CODE (c))
9861 case OMP_CLAUSE_MAP:
9862 tkind = OMP_CLAUSE_MAP_KIND (c);
9863 break;
9864 case OMP_CLAUSE_TO:
9865 tkind = OMP_CLAUSE_MAP_TO;
9866 break;
9867 case OMP_CLAUSE_FROM:
9868 tkind = OMP_CLAUSE_MAP_FROM;
9869 break;
9870 default:
9871 gcc_unreachable ();
9873 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
9874 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
9875 talign = DECL_ALIGN_UNIT (ovar);
9876 talign = ceil_log2 (talign);
9877 tkind |= talign << 3;
9878 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
9879 build_int_cst (unsigned_char_type_node,
9880 tkind));
9881 if (nc && nc != c)
9882 c = nc;
9885 gcc_assert (map_idx == map_cnt);
9887 DECL_INITIAL (TREE_VEC_ELT (t, 1))
9888 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
9889 DECL_INITIAL (TREE_VEC_ELT (t, 2))
9890 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
9891 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
9893 gimple_seq initlist = NULL;
9894 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
9895 TREE_VEC_ELT (t, 1)),
9896 &initlist, true, NULL_TREE);
9897 gimple_seq_add_seq (&ilist, initlist);
9899 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
9900 NULL);
9901 TREE_THIS_VOLATILE (clobber) = 1;
9902 gimple_seq_add_stmt (&olist,
9903 gimple_build_assign (TREE_VEC_ELT (t, 1),
9904 clobber));
9907 tree clobber = build_constructor (ctx->record_type, NULL);
9908 TREE_THIS_VOLATILE (clobber) = 1;
9909 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9910 clobber));
9913 /* Once all the expansions are done, sequence all the different
9914 fragments inside gimple_omp_body. */
9916 new_body = NULL;
9918 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
9920 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9921 /* fixup_child_record_type might have changed receiver_decl's type. */
9922 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9923 gimple_seq_add_stmt (&new_body,
9924 gimple_build_assign (ctx->receiver_decl, t));
9927 if (kind == GF_OMP_TARGET_KIND_REGION)
9929 gimple_seq_add_seq (&new_body, tgt_body);
9930 new_body = maybe_catch_exception (new_body);
9932 else if (kind == GF_OMP_TARGET_KIND_DATA)
9933 new_body = tgt_body;
9934 if (kind != GF_OMP_TARGET_KIND_UPDATE)
9936 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9937 gimple_omp_set_body (stmt, new_body);
9940 bind = gimple_build_bind (NULL, NULL,
9941 tgt_bind ? gimple_bind_block (tgt_bind)
9942 : NULL_TREE);
9943 gsi_replace (gsi_p, bind, true);
9944 gimple_bind_add_seq (bind, ilist);
9945 gimple_bind_add_stmt (bind, stmt);
9946 gimple_bind_add_seq (bind, olist);
9948 pop_gimplify_context (NULL);
9951 /* Expand code for an OpenMP teams directive. */
9953 static void
9954 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9956 gimple teams_stmt = gsi_stmt (*gsi_p);
9957 push_gimplify_context ();
9959 tree block = make_node (BLOCK);
9960 gimple bind = gimple_build_bind (NULL, NULL, block);
9961 gsi_replace (gsi_p, bind, true);
9962 gimple_seq bind_body = NULL;
9963 gimple_seq dlist = NULL;
9964 gimple_seq olist = NULL;
9966 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9967 OMP_CLAUSE_NUM_TEAMS);
9968 if (num_teams == NULL_TREE)
9969 num_teams = build_int_cst (unsigned_type_node, 0);
9970 else
9972 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
9973 num_teams = fold_convert (unsigned_type_node, num_teams);
9974 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
9976 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9977 OMP_CLAUSE_THREAD_LIMIT);
9978 if (thread_limit == NULL_TREE)
9979 thread_limit = build_int_cst (unsigned_type_node, 0);
9980 else
9982 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
9983 thread_limit = fold_convert (unsigned_type_node, thread_limit);
9984 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
9985 fb_rvalue);
9988 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
9989 &bind_body, &dlist, ctx, NULL);
9990 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
9991 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
9992 gimple_seq_add_stmt (&bind_body, teams_stmt);
9994 location_t loc = gimple_location (teams_stmt);
9995 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
9996 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
9997 gimple_set_location (call, loc);
9998 gimple_seq_add_stmt (&bind_body, call);
10000 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
10001 gimple_omp_set_body (teams_stmt, NULL);
10002 gimple_seq_add_seq (&bind_body, olist);
10003 gimple_seq_add_seq (&bind_body, dlist);
10004 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
10005 gimple_bind_set_body (bind, bind_body);
10007 pop_gimplify_context (bind);
10009 gimple_bind_append_vars (bind, ctx->block_vars);
10010 BLOCK_VARS (block) = ctx->block_vars;
10011 if (BLOCK_VARS (block))
10012 TREE_USED (block) = 1;
10016 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
10017 regimplified. If DATA is non-NULL, lower_omp_1 is outside
10018 of OpenMP context, but with task_shared_vars set. */
10020 static tree
10021 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
10022 void *data)
10024 tree t = *tp;
10026 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
10027 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
10028 return t;
10030 if (task_shared_vars
10031 && DECL_P (t)
10032 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
10033 return t;
10035 /* If a global variable has been privatized, TREE_CONSTANT on
10036 ADDR_EXPR might be wrong. */
10037 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
10038 recompute_tree_invariant_for_addr_expr (t);
10040 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
10041 return NULL_TREE;
10044 static void
10045 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10047 gimple stmt = gsi_stmt (*gsi_p);
10048 struct walk_stmt_info wi;
10050 if (gimple_has_location (stmt))
10051 input_location = gimple_location (stmt);
10053 if (task_shared_vars)
10054 memset (&wi, '\0', sizeof (wi));
10056 /* If we have issued syntax errors, avoid doing any heavy lifting.
10057 Just replace the OpenMP directives with a NOP to avoid
10058 confusing RTL expansion. */
10059 if (seen_error () && is_gimple_omp (stmt))
10061 gsi_replace (gsi_p, gimple_build_nop (), true);
10062 return;
10065 switch (gimple_code (stmt))
10067 case GIMPLE_COND:
10068 if ((ctx || task_shared_vars)
10069 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
10070 ctx ? NULL : &wi, NULL)
10071 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
10072 ctx ? NULL : &wi, NULL)))
10073 gimple_regimplify_operands (stmt, gsi_p);
10074 break;
10075 case GIMPLE_CATCH:
10076 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
10077 break;
10078 case GIMPLE_EH_FILTER:
10079 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
10080 break;
10081 case GIMPLE_TRY:
10082 lower_omp (gimple_try_eval_ptr (stmt), ctx);
10083 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
10084 break;
10085 case GIMPLE_TRANSACTION:
10086 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
10087 break;
10088 case GIMPLE_BIND:
10089 lower_omp (gimple_bind_body_ptr (stmt), ctx);
10090 break;
10091 case GIMPLE_OMP_PARALLEL:
10092 case GIMPLE_OMP_TASK:
10093 ctx = maybe_lookup_ctx (stmt);
10094 gcc_assert (ctx);
10095 if (ctx->cancellable)
10096 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10097 lower_omp_taskreg (gsi_p, ctx);
10098 break;
10099 case GIMPLE_OMP_FOR:
10100 ctx = maybe_lookup_ctx (stmt);
10101 gcc_assert (ctx);
10102 if (ctx->cancellable)
10103 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10104 lower_omp_for (gsi_p, ctx);
10105 break;
10106 case GIMPLE_OMP_SECTIONS:
10107 ctx = maybe_lookup_ctx (stmt);
10108 gcc_assert (ctx);
10109 if (ctx->cancellable)
10110 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10111 lower_omp_sections (gsi_p, ctx);
10112 break;
10113 case GIMPLE_OMP_SINGLE:
10114 ctx = maybe_lookup_ctx (stmt);
10115 gcc_assert (ctx);
10116 lower_omp_single (gsi_p, ctx);
10117 break;
10118 case GIMPLE_OMP_MASTER:
10119 ctx = maybe_lookup_ctx (stmt);
10120 gcc_assert (ctx);
10121 lower_omp_master (gsi_p, ctx);
10122 break;
10123 case GIMPLE_OMP_TASKGROUP:
10124 ctx = maybe_lookup_ctx (stmt);
10125 gcc_assert (ctx);
10126 lower_omp_taskgroup (gsi_p, ctx);
10127 break;
10128 case GIMPLE_OMP_ORDERED:
10129 ctx = maybe_lookup_ctx (stmt);
10130 gcc_assert (ctx);
10131 lower_omp_ordered (gsi_p, ctx);
10132 break;
10133 case GIMPLE_OMP_CRITICAL:
10134 ctx = maybe_lookup_ctx (stmt);
10135 gcc_assert (ctx);
10136 lower_omp_critical (gsi_p, ctx);
10137 break;
10138 case GIMPLE_OMP_ATOMIC_LOAD:
10139 if ((ctx || task_shared_vars)
10140 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
10141 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
10142 gimple_regimplify_operands (stmt, gsi_p);
10143 break;
10144 case GIMPLE_OMP_TARGET:
10145 ctx = maybe_lookup_ctx (stmt);
10146 gcc_assert (ctx);
10147 lower_omp_target (gsi_p, ctx);
10148 break;
10149 case GIMPLE_OMP_TEAMS:
10150 ctx = maybe_lookup_ctx (stmt);
10151 gcc_assert (ctx);
10152 lower_omp_teams (gsi_p, ctx);
10153 break;
10154 case GIMPLE_CALL:
10155 tree fndecl;
10156 fndecl = gimple_call_fndecl (stmt);
10157 if (fndecl
10158 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10159 switch (DECL_FUNCTION_CODE (fndecl))
10161 case BUILT_IN_GOMP_BARRIER:
10162 if (ctx == NULL)
10163 break;
10164 /* FALLTHRU */
10165 case BUILT_IN_GOMP_CANCEL:
10166 case BUILT_IN_GOMP_CANCELLATION_POINT:
10167 omp_context *cctx;
10168 cctx = ctx;
10169 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10170 cctx = cctx->outer;
10171 gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
10172 if (!cctx->cancellable)
10174 if (DECL_FUNCTION_CODE (fndecl)
10175 == BUILT_IN_GOMP_CANCELLATION_POINT)
10177 stmt = gimple_build_nop ();
10178 gsi_replace (gsi_p, stmt, false);
10180 break;
10182 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10184 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10185 gimple_call_set_fndecl (stmt, fndecl);
10186 gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
10188 tree lhs;
10189 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)), NULL);
10190 gimple_call_set_lhs (stmt, lhs);
10191 tree fallthru_label;
10192 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10193 gimple g;
10194 g = gimple_build_label (fallthru_label);
10195 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10196 g = gimple_build_cond (NE_EXPR, lhs,
10197 fold_convert (TREE_TYPE (lhs),
10198 boolean_false_node),
10199 cctx->cancel_label, fallthru_label);
10200 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10201 break;
10202 default:
10203 break;
10205 /* FALLTHRU */
10206 default:
10207 if ((ctx || task_shared_vars)
10208 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10209 ctx ? NULL : &wi))
10211 /* Just remove clobbers, this should happen only if we have
10212 "privatized" local addressable variables in SIMD regions,
10213 the clobber isn't needed in that case and gimplifying address
10214 of the ARRAY_REF into a pointer and creating MEM_REF based
10215 clobber would create worse code than we get with the clobber
10216 dropped. */
10217 if (gimple_clobber_p (stmt))
10219 gsi_replace (gsi_p, gimple_build_nop (), true);
10220 break;
10222 gimple_regimplify_operands (stmt, gsi_p);
10224 break;
10228 static void
10229 lower_omp (gimple_seq *body, omp_context *ctx)
10231 location_t saved_location = input_location;
10232 gimple_stmt_iterator gsi;
10233 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10234 lower_omp_1 (&gsi, ctx);
10235 /* During gimplification, we have not always invoked fold_stmt
10236 (gimplify.c:maybe_fold_stmt); call it now. */
10237 if (target_nesting_level)
10238 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10239 fold_stmt (&gsi);
10240 input_location = saved_location;
10243 /* Main entry point. */
10245 static unsigned int
10246 execute_lower_omp (void)
10248 gimple_seq body;
10250 /* This pass always runs, to provide PROP_gimple_lomp.
10251 But there is nothing to do unless -fopenmp is given. */
10252 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_cilkplus == 0)
10253 return 0;
10255 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10256 delete_omp_context);
10258 body = gimple_body (current_function_decl);
10259 scan_omp (&body, NULL);
10260 gcc_assert (taskreg_nesting_level == 0);
10262 if (all_contexts->root)
10264 if (task_shared_vars)
10265 push_gimplify_context ();
10266 lower_omp (&body, NULL);
10267 if (task_shared_vars)
10268 pop_gimplify_context (NULL);
10271 if (all_contexts)
10273 splay_tree_delete (all_contexts);
10274 all_contexts = NULL;
10276 BITMAP_FREE (task_shared_vars);
10277 return 0;
10280 namespace {
10282 const pass_data pass_data_lower_omp =
10284 GIMPLE_PASS, /* type */
10285 "omplower", /* name */
10286 OPTGROUP_NONE, /* optinfo_flags */
10287 TV_NONE, /* tv_id */
10288 PROP_gimple_any, /* properties_required */
10289 PROP_gimple_lomp, /* properties_provided */
10290 0, /* properties_destroyed */
10291 0, /* todo_flags_start */
10292 0, /* todo_flags_finish */
10295 class pass_lower_omp : public gimple_opt_pass
10297 public:
10298 pass_lower_omp (gcc::context *ctxt)
10299 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10302 /* opt_pass methods: */
10303 virtual unsigned int execute (function *) { return execute_lower_omp (); }
10305 }; // class pass_lower_omp
10307 } // anon namespace
10309 gimple_opt_pass *
10310 make_pass_lower_omp (gcc::context *ctxt)
10312 return new pass_lower_omp (ctxt);
10315 /* The following is a utility to diagnose OpenMP structured block violations.
10316 It is not part of the "omplower" pass, as that's invoked too late. It
10317 should be invoked by the respective front ends after gimplification. */
10319 static splay_tree all_labels;
10321 /* Check for mismatched contexts and generate an error if needed. Return
10322 true if an error is detected. */
10324 static bool
10325 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10326 gimple branch_ctx, gimple label_ctx)
10328 if (label_ctx == branch_ctx)
10329 return false;
10333 Previously we kept track of the label's entire context in diagnose_sb_[12]
10334 so we could traverse it and issue a correct "exit" or "enter" error
10335 message upon a structured block violation.
10337 We built the context by building a list with tree_cons'ing, but there is
10338 no easy counterpart in gimple tuples. It seems like far too much work
10339 for issuing exit/enter error messages. If someone really misses the
10340 distinct error message... patches welcome.
10343 #if 0
10344 /* Try to avoid confusing the user by producing and error message
10345 with correct "exit" or "enter" verbiage. We prefer "exit"
10346 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10347 if (branch_ctx == NULL)
10348 exit_p = false;
10349 else
10351 while (label_ctx)
10353 if (TREE_VALUE (label_ctx) == branch_ctx)
10355 exit_p = false;
10356 break;
10358 label_ctx = TREE_CHAIN (label_ctx);
10362 if (exit_p)
10363 error ("invalid exit from OpenMP structured block");
10364 else
10365 error ("invalid entry to OpenMP structured block");
10366 #endif
10368 bool cilkplus_block = false;
10369 if (flag_cilkplus)
10371 if ((branch_ctx
10372 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10373 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10374 || (label_ctx
10375 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
10376 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10377 cilkplus_block = true;
10380 /* If it's obvious we have an invalid entry, be specific about the error. */
10381 if (branch_ctx == NULL)
10383 if (cilkplus_block)
10384 error ("invalid entry to Cilk Plus structured block");
10385 else
10386 error ("invalid entry to OpenMP structured block");
10388 else
10390 /* Otherwise, be vague and lazy, but efficient. */
10391 if (cilkplus_block)
10392 error ("invalid branch to/from a Cilk Plus structured block");
10393 else
10394 error ("invalid branch to/from an OpenMP structured block");
10397 gsi_replace (gsi_p, gimple_build_nop (), false);
10398 return true;
10401 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10402 where each label is found. */
10404 static tree
10405 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10406 struct walk_stmt_info *wi)
10408 gimple context = (gimple) wi->info;
10409 gimple inner_context;
10410 gimple stmt = gsi_stmt (*gsi_p);
10412 *handled_ops_p = true;
10414 switch (gimple_code (stmt))
10416 WALK_SUBSTMTS;
10418 case GIMPLE_OMP_PARALLEL:
10419 case GIMPLE_OMP_TASK:
10420 case GIMPLE_OMP_SECTIONS:
10421 case GIMPLE_OMP_SINGLE:
10422 case GIMPLE_OMP_SECTION:
10423 case GIMPLE_OMP_MASTER:
10424 case GIMPLE_OMP_ORDERED:
10425 case GIMPLE_OMP_CRITICAL:
10426 case GIMPLE_OMP_TARGET:
10427 case GIMPLE_OMP_TEAMS:
10428 case GIMPLE_OMP_TASKGROUP:
10429 /* The minimal context here is just the current OMP construct. */
10430 inner_context = stmt;
10431 wi->info = inner_context;
10432 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10433 wi->info = context;
10434 break;
10436 case GIMPLE_OMP_FOR:
10437 inner_context = stmt;
10438 wi->info = inner_context;
10439 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10440 walk them. */
10441 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10442 diagnose_sb_1, NULL, wi);
10443 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10444 wi->info = context;
10445 break;
10447 case GIMPLE_LABEL:
10448 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
10449 (splay_tree_value) context);
10450 break;
10452 default:
10453 break;
10456 return NULL_TREE;
10459 /* Pass 2: Check each branch and see if its context differs from that of
10460 the destination label's context. */
10462 static tree
10463 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10464 struct walk_stmt_info *wi)
10466 gimple context = (gimple) wi->info;
10467 splay_tree_node n;
10468 gimple stmt = gsi_stmt (*gsi_p);
10470 *handled_ops_p = true;
10472 switch (gimple_code (stmt))
10474 WALK_SUBSTMTS;
10476 case GIMPLE_OMP_PARALLEL:
10477 case GIMPLE_OMP_TASK:
10478 case GIMPLE_OMP_SECTIONS:
10479 case GIMPLE_OMP_SINGLE:
10480 case GIMPLE_OMP_SECTION:
10481 case GIMPLE_OMP_MASTER:
10482 case GIMPLE_OMP_ORDERED:
10483 case GIMPLE_OMP_CRITICAL:
10484 case GIMPLE_OMP_TARGET:
10485 case GIMPLE_OMP_TEAMS:
10486 case GIMPLE_OMP_TASKGROUP:
10487 wi->info = stmt;
10488 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10489 wi->info = context;
10490 break;
10492 case GIMPLE_OMP_FOR:
10493 wi->info = stmt;
10494 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10495 walk them. */
10496 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10497 diagnose_sb_2, NULL, wi);
10498 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10499 wi->info = context;
10500 break;
10502 case GIMPLE_COND:
10504 tree lab = gimple_cond_true_label (stmt);
10505 if (lab)
10507 n = splay_tree_lookup (all_labels,
10508 (splay_tree_key) lab);
10509 diagnose_sb_0 (gsi_p, context,
10510 n ? (gimple) n->value : NULL);
10512 lab = gimple_cond_false_label (stmt);
10513 if (lab)
10515 n = splay_tree_lookup (all_labels,
10516 (splay_tree_key) lab);
10517 diagnose_sb_0 (gsi_p, context,
10518 n ? (gimple) n->value : NULL);
10521 break;
10523 case GIMPLE_GOTO:
10525 tree lab = gimple_goto_dest (stmt);
10526 if (TREE_CODE (lab) != LABEL_DECL)
10527 break;
10529 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10530 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
10532 break;
10534 case GIMPLE_SWITCH:
10536 unsigned int i;
10537 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
10539 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
10540 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10541 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
10542 break;
10545 break;
10547 case GIMPLE_RETURN:
10548 diagnose_sb_0 (gsi_p, context, NULL);
10549 break;
10551 default:
10552 break;
10555 return NULL_TREE;
10558 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10559 codes. */
10560 bool
10561 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
10562 int *region_idx)
10564 gimple last = last_stmt (bb);
10565 enum gimple_code code = gimple_code (last);
10566 struct omp_region *cur_region = *region;
10567 bool fallthru = false;
10569 switch (code)
10571 case GIMPLE_OMP_PARALLEL:
10572 case GIMPLE_OMP_TASK:
10573 case GIMPLE_OMP_FOR:
10574 case GIMPLE_OMP_SINGLE:
10575 case GIMPLE_OMP_TEAMS:
10576 case GIMPLE_OMP_MASTER:
10577 case GIMPLE_OMP_TASKGROUP:
10578 case GIMPLE_OMP_ORDERED:
10579 case GIMPLE_OMP_CRITICAL:
10580 case GIMPLE_OMP_SECTION:
10581 cur_region = new_omp_region (bb, code, cur_region);
10582 fallthru = true;
10583 break;
10585 case GIMPLE_OMP_TARGET:
10586 cur_region = new_omp_region (bb, code, cur_region);
10587 fallthru = true;
10588 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
10589 cur_region = cur_region->outer;
10590 break;
10592 case GIMPLE_OMP_SECTIONS:
10593 cur_region = new_omp_region (bb, code, cur_region);
10594 fallthru = true;
10595 break;
10597 case GIMPLE_OMP_SECTIONS_SWITCH:
10598 fallthru = false;
10599 break;
10601 case GIMPLE_OMP_ATOMIC_LOAD:
10602 case GIMPLE_OMP_ATOMIC_STORE:
10603 fallthru = true;
10604 break;
10606 case GIMPLE_OMP_RETURN:
10607 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
10608 somewhere other than the next block. This will be
10609 created later. */
10610 cur_region->exit = bb;
10611 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
10612 cur_region = cur_region->outer;
10613 break;
10615 case GIMPLE_OMP_CONTINUE:
10616 cur_region->cont = bb;
10617 switch (cur_region->type)
10619 case GIMPLE_OMP_FOR:
10620 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
10621 succs edges as abnormal to prevent splitting
10622 them. */
10623 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
10624 /* Make the loopback edge. */
10625 make_edge (bb, single_succ (cur_region->entry),
10626 EDGE_ABNORMAL);
10628 /* Create an edge from GIMPLE_OMP_FOR to exit, which
10629 corresponds to the case that the body of the loop
10630 is not executed at all. */
10631 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
10632 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
10633 fallthru = false;
10634 break;
10636 case GIMPLE_OMP_SECTIONS:
10637 /* Wire up the edges into and out of the nested sections. */
10639 basic_block switch_bb = single_succ (cur_region->entry);
10641 struct omp_region *i;
10642 for (i = cur_region->inner; i ; i = i->next)
10644 gcc_assert (i->type == GIMPLE_OMP_SECTION);
10645 make_edge (switch_bb, i->entry, 0);
10646 make_edge (i->exit, bb, EDGE_FALLTHRU);
10649 /* Make the loopback edge to the block with
10650 GIMPLE_OMP_SECTIONS_SWITCH. */
10651 make_edge (bb, switch_bb, 0);
10653 /* Make the edge from the switch to exit. */
10654 make_edge (switch_bb, bb->next_bb, 0);
10655 fallthru = false;
10657 break;
10659 default:
10660 gcc_unreachable ();
10662 break;
10664 default:
10665 gcc_unreachable ();
10668 if (*region != cur_region)
10670 *region = cur_region;
10671 if (cur_region)
10672 *region_idx = cur_region->entry->index;
10673 else
10674 *region_idx = 0;
10677 return fallthru;
10680 static unsigned int
10681 diagnose_omp_structured_block_errors (void)
10683 struct walk_stmt_info wi;
10684 gimple_seq body = gimple_body (current_function_decl);
10686 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
10688 memset (&wi, 0, sizeof (wi));
10689 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
10691 memset (&wi, 0, sizeof (wi));
10692 wi.want_locations = true;
10693 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
10695 gimple_set_body (current_function_decl, body);
10697 splay_tree_delete (all_labels);
10698 all_labels = NULL;
10700 return 0;
10703 namespace {
10705 const pass_data pass_data_diagnose_omp_blocks =
10707 GIMPLE_PASS, /* type */
10708 "*diagnose_omp_blocks", /* name */
10709 OPTGROUP_NONE, /* optinfo_flags */
10710 TV_NONE, /* tv_id */
10711 PROP_gimple_any, /* properties_required */
10712 0, /* properties_provided */
10713 0, /* properties_destroyed */
10714 0, /* todo_flags_start */
10715 0, /* todo_flags_finish */
10718 class pass_diagnose_omp_blocks : public gimple_opt_pass
10720 public:
10721 pass_diagnose_omp_blocks (gcc::context *ctxt)
10722 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
10725 /* opt_pass methods: */
10726 virtual bool gate (function *) { return flag_openmp || flag_cilkplus; }
10727 virtual unsigned int execute (function *)
10729 return diagnose_omp_structured_block_errors ();
10732 }; // class pass_diagnose_omp_blocks
10734 } // anon namespace
10736 gimple_opt_pass *
10737 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
10739 return new pass_diagnose_omp_blocks (ctxt);
10742 /* SIMD clone supporting code. */
10744 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
10745 of arguments to reserve space for. */
10747 static struct cgraph_simd_clone *
10748 simd_clone_struct_alloc (int nargs)
10750 struct cgraph_simd_clone *clone_info;
10751 size_t len = (sizeof (struct cgraph_simd_clone)
10752 + nargs * sizeof (struct cgraph_simd_clone_arg));
10753 clone_info = (struct cgraph_simd_clone *)
10754 ggc_internal_cleared_alloc (len);
10755 return clone_info;
10758 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
10760 static inline void
10761 simd_clone_struct_copy (struct cgraph_simd_clone *to,
10762 struct cgraph_simd_clone *from)
10764 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
10765 + ((from->nargs - from->inbranch)
10766 * sizeof (struct cgraph_simd_clone_arg))));
10769 /* Return vector of parameter types of function FNDECL. This uses
10770 TYPE_ARG_TYPES if available, otherwise falls back to types of
10771 DECL_ARGUMENTS types. */
10773 vec<tree>
10774 simd_clone_vector_of_formal_parm_types (tree fndecl)
10776 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
10777 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
10778 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
10779 unsigned int i;
10780 tree arg;
10781 FOR_EACH_VEC_ELT (args, i, arg)
10782 args[i] = TREE_TYPE (args[i]);
10783 return args;
10786 /* Given a simd function in NODE, extract the simd specific
10787 information from the OMP clauses passed in CLAUSES, and return
10788 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
10789 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
10790 otherwise set to FALSE. */
10792 static struct cgraph_simd_clone *
10793 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
10794 bool *inbranch_specified)
10796 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
10797 tree t;
10798 int n;
10799 *inbranch_specified = false;
10801 n = args.length ();
10802 if (n > 0 && args.last () == void_type_node)
10803 n--;
10805 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
10806 be cloned have a distinctive artificial label in addition to "omp
10807 declare simd". */
10808 bool cilk_clone
10809 = (flag_cilkplus
10810 && lookup_attribute ("cilk simd function",
10811 DECL_ATTRIBUTES (node->decl)));
10813 /* Allocate one more than needed just in case this is an in-branch
10814 clone which will require a mask argument. */
10815 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
10816 clone_info->nargs = n;
10817 clone_info->cilk_elemental = cilk_clone;
10819 if (!clauses)
10821 args.release ();
10822 return clone_info;
10824 clauses = TREE_VALUE (clauses);
10825 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
10826 return clone_info;
10828 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
10830 switch (OMP_CLAUSE_CODE (t))
10832 case OMP_CLAUSE_INBRANCH:
10833 clone_info->inbranch = 1;
10834 *inbranch_specified = true;
10835 break;
10836 case OMP_CLAUSE_NOTINBRANCH:
10837 clone_info->inbranch = 0;
10838 *inbranch_specified = true;
10839 break;
10840 case OMP_CLAUSE_SIMDLEN:
10841 clone_info->simdlen
10842 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
10843 break;
10844 case OMP_CLAUSE_LINEAR:
10846 tree decl = OMP_CLAUSE_DECL (t);
10847 tree step = OMP_CLAUSE_LINEAR_STEP (t);
10848 int argno = TREE_INT_CST_LOW (decl);
10849 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
10851 clone_info->args[argno].arg_type
10852 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
10853 clone_info->args[argno].linear_step = tree_to_shwi (step);
10854 gcc_assert (clone_info->args[argno].linear_step >= 0
10855 && clone_info->args[argno].linear_step < n);
10857 else
10859 if (POINTER_TYPE_P (args[argno]))
10860 step = fold_convert (ssizetype, step);
10861 if (!tree_fits_shwi_p (step))
10863 warning_at (OMP_CLAUSE_LOCATION (t), 0,
10864 "ignoring large linear step");
10865 args.release ();
10866 return NULL;
10868 else if (integer_zerop (step))
10870 warning_at (OMP_CLAUSE_LOCATION (t), 0,
10871 "ignoring zero linear step");
10872 args.release ();
10873 return NULL;
10875 else
10877 clone_info->args[argno].arg_type
10878 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
10879 clone_info->args[argno].linear_step = tree_to_shwi (step);
10882 break;
10884 case OMP_CLAUSE_UNIFORM:
10886 tree decl = OMP_CLAUSE_DECL (t);
10887 int argno = tree_to_uhwi (decl);
10888 clone_info->args[argno].arg_type
10889 = SIMD_CLONE_ARG_TYPE_UNIFORM;
10890 break;
10892 case OMP_CLAUSE_ALIGNED:
10894 tree decl = OMP_CLAUSE_DECL (t);
10895 int argno = tree_to_uhwi (decl);
10896 clone_info->args[argno].alignment
10897 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
10898 break;
10900 default:
10901 break;
10904 args.release ();
10905 return clone_info;
10908 /* Given a SIMD clone in NODE, calculate the characteristic data
10909 type and return the coresponding type. The characteristic data
10910 type is computed as described in the Intel Vector ABI. */
10912 static tree
10913 simd_clone_compute_base_data_type (struct cgraph_node *node,
10914 struct cgraph_simd_clone *clone_info)
10916 tree type = integer_type_node;
10917 tree fndecl = node->decl;
10919 /* a) For non-void function, the characteristic data type is the
10920 return type. */
10921 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
10922 type = TREE_TYPE (TREE_TYPE (fndecl));
10924 /* b) If the function has any non-uniform, non-linear parameters,
10925 then the characteristic data type is the type of the first
10926 such parameter. */
10927 else
10929 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
10930 for (unsigned int i = 0; i < clone_info->nargs; ++i)
10931 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
10933 type = map[i];
10934 break;
10936 map.release ();
10939 /* c) If the characteristic data type determined by a) or b) above
10940 is struct, union, or class type which is pass-by-value (except
10941 for the type that maps to the built-in complex data type), the
10942 characteristic data type is int. */
10943 if (RECORD_OR_UNION_TYPE_P (type)
10944 && !aggregate_value_p (type, NULL)
10945 && TREE_CODE (type) != COMPLEX_TYPE)
10946 return integer_type_node;
10948 /* d) If none of the above three classes is applicable, the
10949 characteristic data type is int. */
10951 return type;
10953 /* e) For Intel Xeon Phi native and offload compilation, if the
10954 resulting characteristic data type is 8-bit or 16-bit integer
10955 data type, the characteristic data type is int. */
10956 /* Well, we don't handle Xeon Phi yet. */
10959 static tree
10960 simd_clone_mangle (struct cgraph_node *node,
10961 struct cgraph_simd_clone *clone_info)
10963 char vecsize_mangle = clone_info->vecsize_mangle;
10964 char mask = clone_info->inbranch ? 'M' : 'N';
10965 unsigned int simdlen = clone_info->simdlen;
10966 unsigned int n;
10967 pretty_printer pp;
10969 gcc_assert (vecsize_mangle && simdlen);
10971 pp_string (&pp, "_ZGV");
10972 pp_character (&pp, vecsize_mangle);
10973 pp_character (&pp, mask);
10974 pp_decimal_int (&pp, simdlen);
10976 for (n = 0; n < clone_info->nargs; ++n)
10978 struct cgraph_simd_clone_arg arg = clone_info->args[n];
10980 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
10981 pp_character (&pp, 'u');
10982 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
10984 gcc_assert (arg.linear_step != 0);
10985 pp_character (&pp, 'l');
10986 if (arg.linear_step > 1)
10987 pp_unsigned_wide_integer (&pp, arg.linear_step);
10988 else if (arg.linear_step < 0)
10990 pp_character (&pp, 'n');
10991 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
10992 arg.linear_step));
10995 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
10997 pp_character (&pp, 's');
10998 pp_unsigned_wide_integer (&pp, arg.linear_step);
11000 else
11001 pp_character (&pp, 'v');
11002 if (arg.alignment)
11004 pp_character (&pp, 'a');
11005 pp_decimal_int (&pp, arg.alignment);
11009 pp_underscore (&pp);
11010 pp_string (&pp,
11011 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)));
11012 const char *str = pp_formatted_text (&pp);
11014 /* If there already is a SIMD clone with the same mangled name, don't
11015 add another one. This can happen e.g. for
11016 #pragma omp declare simd
11017 #pragma omp declare simd simdlen(8)
11018 int foo (int, int);
11019 if the simdlen is assumed to be 8 for the first one, etc. */
11020 for (struct cgraph_node *clone = node->simd_clones; clone;
11021 clone = clone->simdclone->next_clone)
11022 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
11023 str) == 0)
11024 return NULL_TREE;
11026 return get_identifier (str);
11029 /* Create a simd clone of OLD_NODE and return it. */
11031 static struct cgraph_node *
11032 simd_clone_create (struct cgraph_node *old_node)
11034 struct cgraph_node *new_node;
11035 if (old_node->definition)
11037 if (!old_node->has_gimple_body_p ())
11038 return NULL;
11039 old_node->get_body ();
11040 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
11041 false, NULL, NULL,
11042 "simdclone");
11044 else
11046 tree old_decl = old_node->decl;
11047 tree new_decl = copy_node (old_node->decl);
11048 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
11049 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
11050 SET_DECL_RTL (new_decl, NULL);
11051 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
11052 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
11053 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
11054 symtab->call_cgraph_insertion_hooks (new_node);
11056 if (new_node == NULL)
11057 return new_node;
11059 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
11061 /* The function cgraph_function_versioning () will force the new
11062 symbol local. Undo this, and inherit external visability from
11063 the old node. */
11064 new_node->local.local = old_node->local.local;
11065 new_node->externally_visible = old_node->externally_visible;
11067 return new_node;
11070 /* Adjust the return type of the given function to its appropriate
11071 vector counterpart. Returns a simd array to be used throughout the
11072 function as a return value. */
11074 static tree
11075 simd_clone_adjust_return_type (struct cgraph_node *node)
11077 tree fndecl = node->decl;
11078 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
11079 unsigned int veclen;
11080 tree t;
11082 /* Adjust the function return type. */
11083 if (orig_rettype == void_type_node)
11084 return NULL_TREE;
11085 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
11086 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))
11087 || POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl))))
11088 veclen = node->simdclone->vecsize_int;
11089 else
11090 veclen = node->simdclone->vecsize_float;
11091 veclen /= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl))));
11092 if (veclen > node->simdclone->simdlen)
11093 veclen = node->simdclone->simdlen;
11094 if (veclen == node->simdclone->simdlen)
11095 TREE_TYPE (TREE_TYPE (fndecl))
11096 = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)),
11097 node->simdclone->simdlen);
11098 else
11100 t = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)), veclen);
11101 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
11102 TREE_TYPE (TREE_TYPE (fndecl)) = t;
11104 if (!node->definition)
11105 return NULL_TREE;
11107 t = DECL_RESULT (fndecl);
11108 /* Adjust the DECL_RESULT. */
11109 gcc_assert (TREE_TYPE (t) != void_type_node);
11110 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
11111 relayout_decl (t);
11113 tree atype = build_array_type_nelts (orig_rettype,
11114 node->simdclone->simdlen);
11115 if (veclen != node->simdclone->simdlen)
11116 return build1 (VIEW_CONVERT_EXPR, atype, t);
11118 /* Set up a SIMD array to use as the return value. */
11119 tree retval = create_tmp_var_raw (atype, "retval");
11120 gimple_add_tmp_var (retval);
11121 return retval;
11124 /* Each vector argument has a corresponding array to be used locally
11125 as part of the eventual loop. Create such temporary array and
11126 return it.
11128 PREFIX is the prefix to be used for the temporary.
11130 TYPE is the inner element type.
11132 SIMDLEN is the number of elements. */
11134 static tree
11135 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
11137 tree atype = build_array_type_nelts (type, simdlen);
11138 tree avar = create_tmp_var_raw (atype, prefix);
11139 gimple_add_tmp_var (avar);
11140 return avar;
11143 /* Modify the function argument types to their corresponding vector
11144 counterparts if appropriate. Also, create one array for each simd
11145 argument to be used locally when using the function arguments as
11146 part of the loop.
11148 NODE is the function whose arguments are to be adjusted.
11150 Returns an adjustment vector that will be filled describing how the
11151 argument types will be adjusted. */
11153 static ipa_parm_adjustment_vec
11154 simd_clone_adjust_argument_types (struct cgraph_node *node)
11156 vec<tree> args;
11157 ipa_parm_adjustment_vec adjustments;
11159 if (node->definition)
11160 args = ipa_get_vector_of_formal_parms (node->decl);
11161 else
11162 args = simd_clone_vector_of_formal_parm_types (node->decl);
11163 adjustments.create (args.length ());
11164 unsigned i, j, veclen;
11165 struct ipa_parm_adjustment adj;
11166 for (i = 0; i < node->simdclone->nargs; ++i)
11168 memset (&adj, 0, sizeof (adj));
11169 tree parm = args[i];
11170 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
11171 adj.base_index = i;
11172 adj.base = parm;
11174 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
11175 node->simdclone->args[i].orig_type = parm_type;
11177 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
11179 /* No adjustment necessary for scalar arguments. */
11180 adj.op = IPA_PARM_OP_COPY;
11182 else
11184 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
11185 veclen = node->simdclone->vecsize_int;
11186 else
11187 veclen = node->simdclone->vecsize_float;
11188 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
11189 if (veclen > node->simdclone->simdlen)
11190 veclen = node->simdclone->simdlen;
11191 adj.arg_prefix = "simd";
11192 adj.type = build_vector_type (parm_type, veclen);
11193 node->simdclone->args[i].vector_type = adj.type;
11194 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11196 adjustments.safe_push (adj);
11197 if (j == veclen)
11199 memset (&adj, 0, sizeof (adj));
11200 adj.op = IPA_PARM_OP_NEW;
11201 adj.arg_prefix = "simd";
11202 adj.base_index = i;
11203 adj.type = node->simdclone->args[i].vector_type;
11207 if (node->definition)
11208 node->simdclone->args[i].simd_array
11209 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
11210 parm_type, node->simdclone->simdlen);
11212 adjustments.safe_push (adj);
11215 if (node->simdclone->inbranch)
11217 tree base_type
11218 = simd_clone_compute_base_data_type (node->simdclone->origin,
11219 node->simdclone);
11221 memset (&adj, 0, sizeof (adj));
11222 adj.op = IPA_PARM_OP_NEW;
11223 adj.arg_prefix = "mask";
11225 adj.base_index = i;
11226 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
11227 veclen = node->simdclone->vecsize_int;
11228 else
11229 veclen = node->simdclone->vecsize_float;
11230 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
11231 if (veclen > node->simdclone->simdlen)
11232 veclen = node->simdclone->simdlen;
11233 adj.type = build_vector_type (base_type, veclen);
11234 adjustments.safe_push (adj);
11236 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11237 adjustments.safe_push (adj);
11239 /* We have previously allocated one extra entry for the mask. Use
11240 it and fill it. */
11241 struct cgraph_simd_clone *sc = node->simdclone;
11242 sc->nargs++;
11243 if (node->definition)
11245 sc->args[i].orig_arg
11246 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
11247 sc->args[i].simd_array
11248 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
11250 sc->args[i].orig_type = base_type;
11251 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
11254 if (node->definition)
11255 ipa_modify_formal_parameters (node->decl, adjustments);
11256 else
11258 tree new_arg_types = NULL_TREE, new_reversed;
11259 bool last_parm_void = false;
11260 if (args.length () > 0 && args.last () == void_type_node)
11261 last_parm_void = true;
11263 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
11264 j = adjustments.length ();
11265 for (i = 0; i < j; i++)
11267 struct ipa_parm_adjustment *adj = &adjustments[i];
11268 tree ptype;
11269 if (adj->op == IPA_PARM_OP_COPY)
11270 ptype = args[adj->base_index];
11271 else
11272 ptype = adj->type;
11273 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
11275 new_reversed = nreverse (new_arg_types);
11276 if (last_parm_void)
11278 if (new_reversed)
11279 TREE_CHAIN (new_arg_types) = void_list_node;
11280 else
11281 new_reversed = void_list_node;
11284 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
11285 TYPE_ARG_TYPES (new_type) = new_reversed;
11286 TREE_TYPE (node->decl) = new_type;
11288 adjustments.release ();
11290 args.release ();
11291 return adjustments;
11294 /* Initialize and copy the function arguments in NODE to their
11295 corresponding local simd arrays. Returns a fresh gimple_seq with
11296 the instruction sequence generated. */
11298 static gimple_seq
11299 simd_clone_init_simd_arrays (struct cgraph_node *node,
11300 ipa_parm_adjustment_vec adjustments)
11302 gimple_seq seq = NULL;
11303 unsigned i = 0, j = 0, k;
11305 for (tree arg = DECL_ARGUMENTS (node->decl);
11306 arg;
11307 arg = DECL_CHAIN (arg), i++, j++)
11309 if (adjustments[j].op == IPA_PARM_OP_COPY)
11310 continue;
11312 node->simdclone->args[i].vector_arg = arg;
11314 tree array = node->simdclone->args[i].simd_array;
11315 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
11317 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11318 tree ptr = build_fold_addr_expr (array);
11319 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11320 build_int_cst (ptype, 0));
11321 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11322 gimplify_and_add (t, &seq);
11324 else
11326 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
11327 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11328 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
11330 tree ptr = build_fold_addr_expr (array);
11331 int elemsize;
11332 if (k)
11334 arg = DECL_CHAIN (arg);
11335 j++;
11337 elemsize
11338 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
11339 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11340 build_int_cst (ptype, k * elemsize));
11341 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11342 gimplify_and_add (t, &seq);
11346 return seq;
11349 /* Callback info for ipa_simd_modify_stmt_ops below. */
11351 struct modify_stmt_info {
11352 ipa_parm_adjustment_vec adjustments;
11353 gimple stmt;
11354 /* True if the parent statement was modified by
11355 ipa_simd_modify_stmt_ops. */
11356 bool modified;
11359 /* Callback for walk_gimple_op.
11361 Adjust operands from a given statement as specified in the
11362 adjustments vector in the callback data. */
11364 static tree
11365 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
11367 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
11368 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
11369 tree *orig_tp = tp;
11370 if (TREE_CODE (*tp) == ADDR_EXPR)
11371 tp = &TREE_OPERAND (*tp, 0);
11372 struct ipa_parm_adjustment *cand = NULL;
11373 if (TREE_CODE (*tp) == PARM_DECL)
11374 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
11375 else
11377 if (TYPE_P (*tp))
11378 *walk_subtrees = 0;
11381 tree repl = NULL_TREE;
11382 if (cand)
11383 repl = unshare_expr (cand->new_decl);
11384 else
11386 if (tp != orig_tp)
11388 *walk_subtrees = 0;
11389 bool modified = info->modified;
11390 info->modified = false;
11391 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
11392 if (!info->modified)
11394 info->modified = modified;
11395 return NULL_TREE;
11397 info->modified = modified;
11398 repl = *tp;
11400 else
11401 return NULL_TREE;
11404 if (tp != orig_tp)
11406 repl = build_fold_addr_expr (repl);
11407 gimple stmt
11408 = gimple_build_assign (make_ssa_name (TREE_TYPE (repl), NULL), repl);
11409 repl = gimple_assign_lhs (stmt);
11410 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
11411 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11412 *orig_tp = repl;
11414 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
11416 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
11417 *tp = vce;
11419 else
11420 *tp = repl;
11422 info->modified = true;
11423 return NULL_TREE;
11426 /* Traverse the function body and perform all modifications as
11427 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11428 modified such that the replacement/reduction value will now be an
11429 offset into the corresponding simd_array.
11431 This function will replace all function argument uses with their
11432 corresponding simd array elements, and ajust the return values
11433 accordingly. */
11435 static void
11436 ipa_simd_modify_function_body (struct cgraph_node *node,
11437 ipa_parm_adjustment_vec adjustments,
11438 tree retval_array, tree iter)
11440 basic_block bb;
11441 unsigned int i, j, l;
11443 /* Re-use the adjustments array, but this time use it to replace
11444 every function argument use to an offset into the corresponding
11445 simd_array. */
11446 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
11448 if (!node->simdclone->args[i].vector_arg)
11449 continue;
11451 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
11452 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
11453 adjustments[j].new_decl
11454 = build4 (ARRAY_REF,
11455 basetype,
11456 node->simdclone->args[i].simd_array,
11457 iter,
11458 NULL_TREE, NULL_TREE);
11459 if (adjustments[j].op == IPA_PARM_OP_NONE
11460 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
11461 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
11464 l = adjustments.length ();
11465 for (i = 1; i < num_ssa_names; i++)
11467 tree name = ssa_name (i);
11468 if (name
11469 && SSA_NAME_VAR (name)
11470 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
11472 for (j = 0; j < l; j++)
11473 if (SSA_NAME_VAR (name) == adjustments[j].base
11474 && adjustments[j].new_decl)
11476 tree base_var;
11477 if (adjustments[j].new_ssa_base == NULL_TREE)
11479 base_var
11480 = copy_var_decl (adjustments[j].base,
11481 DECL_NAME (adjustments[j].base),
11482 TREE_TYPE (adjustments[j].base));
11483 adjustments[j].new_ssa_base = base_var;
11485 else
11486 base_var = adjustments[j].new_ssa_base;
11487 if (SSA_NAME_IS_DEFAULT_DEF (name))
11489 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11490 gimple_stmt_iterator gsi = gsi_after_labels (bb);
11491 tree new_decl = unshare_expr (adjustments[j].new_decl);
11492 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
11493 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11494 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
11495 gimple stmt = gimple_build_assign (name, new_decl);
11496 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11498 else
11499 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11504 struct modify_stmt_info info;
11505 info.adjustments = adjustments;
11507 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
11509 gimple_stmt_iterator gsi;
11511 gsi = gsi_start_bb (bb);
11512 while (!gsi_end_p (gsi))
11514 gimple stmt = gsi_stmt (gsi);
11515 info.stmt = stmt;
11516 struct walk_stmt_info wi;
11518 memset (&wi, 0, sizeof (wi));
11519 info.modified = false;
11520 wi.info = &info;
11521 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
11523 if (gimple_code (stmt) == GIMPLE_RETURN)
11525 tree retval = gimple_return_retval (stmt);
11526 if (!retval)
11528 gsi_remove (&gsi, true);
11529 continue;
11532 /* Replace `return foo' with `retval_array[iter] = foo'. */
11533 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
11534 retval_array, iter, NULL, NULL);
11535 stmt = gimple_build_assign (ref, retval);
11536 gsi_replace (&gsi, stmt, true);
11537 info.modified = true;
11540 if (info.modified)
11542 update_stmt (stmt);
11543 if (maybe_clean_eh_stmt (stmt))
11544 gimple_purge_dead_eh_edges (gimple_bb (stmt));
11546 gsi_next (&gsi);
11551 /* Adjust the argument types in NODE to their appropriate vector
11552 counterparts. */
11554 static void
11555 simd_clone_adjust (struct cgraph_node *node)
11557 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
11559 targetm.simd_clone.adjust (node);
11561 tree retval = simd_clone_adjust_return_type (node);
11562 ipa_parm_adjustment_vec adjustments
11563 = simd_clone_adjust_argument_types (node);
11565 push_gimplify_context ();
11567 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
11569 /* Adjust all uses of vector arguments accordingly. Adjust all
11570 return values accordingly. */
11571 tree iter = create_tmp_var (unsigned_type_node, "iter");
11572 tree iter1 = make_ssa_name (iter, NULL);
11573 tree iter2 = make_ssa_name (iter, NULL);
11574 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
11576 /* Initialize the iteration variable. */
11577 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11578 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
11579 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
11580 /* Insert the SIMD array and iv initialization at function
11581 entry. */
11582 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
11584 pop_gimplify_context (NULL);
11586 /* Create a new BB right before the original exit BB, to hold the
11587 iteration increment and the condition/branch. */
11588 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
11589 basic_block incr_bb = create_empty_bb (orig_exit);
11590 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
11591 flag. Set it now to be a FALLTHRU_EDGE. */
11592 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
11593 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
11594 for (unsigned i = 0;
11595 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
11597 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
11598 redirect_edge_succ (e, incr_bb);
11600 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
11601 e->probability = REG_BR_PROB_BASE;
11602 gsi = gsi_last_bb (incr_bb);
11603 gimple g = gimple_build_assign_with_ops (PLUS_EXPR, iter2, iter1,
11604 build_int_cst (unsigned_type_node,
11605 1));
11606 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11608 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
11609 struct loop *loop = alloc_loop ();
11610 cfun->has_force_vectorize_loops = true;
11611 loop->safelen = node->simdclone->simdlen;
11612 loop->force_vectorize = true;
11613 loop->header = body_bb;
11614 add_bb_to_loop (incr_bb, loop);
11616 /* Branch around the body if the mask applies. */
11617 if (node->simdclone->inbranch)
11619 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
11620 tree mask_array
11621 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
11622 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)), NULL);
11623 tree aref = build4 (ARRAY_REF,
11624 TREE_TYPE (TREE_TYPE (mask_array)),
11625 mask_array, iter1,
11626 NULL, NULL);
11627 g = gimple_build_assign (mask, aref);
11628 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11629 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
11630 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
11632 aref = build1 (VIEW_CONVERT_EXPR,
11633 build_nonstandard_integer_type (bitsize, 0), mask);
11634 mask = make_ssa_name (TREE_TYPE (aref), NULL);
11635 g = gimple_build_assign (mask, aref);
11636 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11639 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
11640 NULL, NULL);
11641 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11642 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
11643 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
11646 /* Generate the condition. */
11647 g = gimple_build_cond (LT_EXPR,
11648 iter2,
11649 build_int_cst (unsigned_type_node,
11650 node->simdclone->simdlen),
11651 NULL, NULL);
11652 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11653 e = split_block (incr_bb, gsi_stmt (gsi));
11654 basic_block latch_bb = e->dest;
11655 basic_block new_exit_bb = e->dest;
11656 new_exit_bb = split_block (latch_bb, NULL)->dest;
11657 loop->latch = latch_bb;
11659 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
11661 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
11662 /* The successor of incr_bb is already pointing to latch_bb; just
11663 change the flags.
11664 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
11665 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
11667 gimple phi = create_phi_node (iter1, body_bb);
11668 edge preheader_edge = find_edge (entry_bb, body_bb);
11669 edge latch_edge = single_succ_edge (latch_bb);
11670 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
11671 UNKNOWN_LOCATION);
11672 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
11674 /* Generate the new return. */
11675 gsi = gsi_last_bb (new_exit_bb);
11676 if (retval
11677 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
11678 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
11679 retval = TREE_OPERAND (retval, 0);
11680 else if (retval)
11682 retval = build1 (VIEW_CONVERT_EXPR,
11683 TREE_TYPE (TREE_TYPE (node->decl)),
11684 retval);
11685 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
11686 false, GSI_CONTINUE_LINKING);
11688 g = gimple_build_return (retval);
11689 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11691 /* Handle aligned clauses by replacing default defs of the aligned
11692 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
11693 lhs. Handle linear by adding PHIs. */
11694 for (unsigned i = 0; i < node->simdclone->nargs; i++)
11695 if (node->simdclone->args[i].alignment
11696 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
11697 && (node->simdclone->args[i].alignment
11698 & (node->simdclone->args[i].alignment - 1)) == 0
11699 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
11700 == POINTER_TYPE)
11702 unsigned int alignment = node->simdclone->args[i].alignment;
11703 tree orig_arg = node->simdclone->args[i].orig_arg;
11704 tree def = ssa_default_def (cfun, orig_arg);
11705 if (def && !has_zero_uses (def))
11707 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
11708 gimple_seq seq = NULL;
11709 bool need_cvt = false;
11710 gimple call
11711 = gimple_build_call (fn, 2, def, size_int (alignment));
11712 g = call;
11713 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
11714 ptr_type_node))
11715 need_cvt = true;
11716 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg, NULL);
11717 gimple_call_set_lhs (g, t);
11718 gimple_seq_add_stmt_without_update (&seq, g);
11719 if (need_cvt)
11721 t = make_ssa_name (orig_arg, NULL);
11722 g = gimple_build_assign_with_ops (NOP_EXPR, t,
11723 gimple_call_lhs (g),
11724 NULL_TREE);
11725 gimple_seq_add_stmt_without_update (&seq, g);
11727 gsi_insert_seq_on_edge_immediate
11728 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
11730 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11731 int freq = compute_call_stmt_bb_frequency (current_function_decl,
11732 entry_bb);
11733 node->create_edge (cgraph_node::get_create (fn),
11734 call, entry_bb->count, freq);
11736 imm_use_iterator iter;
11737 use_operand_p use_p;
11738 gimple use_stmt;
11739 tree repl = gimple_get_lhs (g);
11740 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
11741 if (is_gimple_debug (use_stmt) || use_stmt == call)
11742 continue;
11743 else
11744 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
11745 SET_USE (use_p, repl);
11748 else if (node->simdclone->args[i].arg_type
11749 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
11751 tree orig_arg = node->simdclone->args[i].orig_arg;
11752 tree def = ssa_default_def (cfun, orig_arg);
11753 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
11754 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
11755 if (def && !has_zero_uses (def))
11757 iter1 = make_ssa_name (orig_arg, NULL);
11758 iter2 = make_ssa_name (orig_arg, NULL);
11759 phi = create_phi_node (iter1, body_bb);
11760 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
11761 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
11762 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
11763 ? PLUS_EXPR : POINTER_PLUS_EXPR;
11764 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
11765 ? TREE_TYPE (orig_arg) : sizetype;
11766 tree addcst
11767 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
11768 g = gimple_build_assign_with_ops (code, iter2, iter1, addcst);
11769 gsi = gsi_last_bb (incr_bb);
11770 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
11772 imm_use_iterator iter;
11773 use_operand_p use_p;
11774 gimple use_stmt;
11775 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
11776 if (use_stmt == phi)
11777 continue;
11778 else
11779 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
11780 SET_USE (use_p, iter1);
11784 calculate_dominance_info (CDI_DOMINATORS);
11785 add_loop (loop, loop->header->loop_father);
11786 update_ssa (TODO_update_ssa);
11788 pop_cfun ();
11791 /* If the function in NODE is tagged as an elemental SIMD function,
11792 create the appropriate SIMD clones. */
11794 static void
11795 expand_simd_clones (struct cgraph_node *node)
11797 tree attr = lookup_attribute ("omp declare simd",
11798 DECL_ATTRIBUTES (node->decl));
11799 if (attr == NULL_TREE
11800 || node->global.inlined_to
11801 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
11802 return;
11804 /* Ignore
11805 #pragma omp declare simd
11806 extern int foo ();
11807 in C, there we don't know the argument types at all. */
11808 if (!node->definition
11809 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
11810 return;
11814 /* Start with parsing the "omp declare simd" attribute(s). */
11815 bool inbranch_clause_specified;
11816 struct cgraph_simd_clone *clone_info
11817 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
11818 &inbranch_clause_specified);
11819 if (clone_info == NULL)
11820 continue;
11822 int orig_simdlen = clone_info->simdlen;
11823 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
11824 /* The target can return 0 (no simd clones should be created),
11825 1 (just one ISA of simd clones should be created) or higher
11826 count of ISA variants. In that case, clone_info is initialized
11827 for the first ISA variant. */
11828 int count
11829 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
11830 base_type, 0);
11831 if (count == 0)
11832 continue;
11834 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
11835 also create one inbranch and one !inbranch clone of it. */
11836 for (int i = 0; i < count * 2; i++)
11838 struct cgraph_simd_clone *clone = clone_info;
11839 if (inbranch_clause_specified && (i & 1) != 0)
11840 continue;
11842 if (i != 0)
11844 clone = simd_clone_struct_alloc (clone_info->nargs
11845 + ((i & 1) != 0));
11846 simd_clone_struct_copy (clone, clone_info);
11847 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
11848 and simd_clone_adjust_argument_types did to the first
11849 clone's info. */
11850 clone->nargs -= clone_info->inbranch;
11851 clone->simdlen = orig_simdlen;
11852 /* And call the target hook again to get the right ISA. */
11853 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
11854 base_type,
11855 i / 2);
11856 if ((i & 1) != 0)
11857 clone->inbranch = 1;
11860 /* simd_clone_mangle might fail if such a clone has been created
11861 already. */
11862 tree id = simd_clone_mangle (node, clone);
11863 if (id == NULL_TREE)
11864 continue;
11866 /* Only when we are sure we want to create the clone actually
11867 clone the function (or definitions) or create another
11868 extern FUNCTION_DECL (for prototypes without definitions). */
11869 struct cgraph_node *n = simd_clone_create (node);
11870 if (n == NULL)
11871 continue;
11873 n->simdclone = clone;
11874 clone->origin = node;
11875 clone->next_clone = NULL;
11876 if (node->simd_clones == NULL)
11878 clone->prev_clone = n;
11879 node->simd_clones = n;
11881 else
11883 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
11884 clone->prev_clone->simdclone->next_clone = n;
11885 node->simd_clones->simdclone->prev_clone = n;
11887 symtab->change_decl_assembler_name (n->decl, id);
11888 /* And finally adjust the return type, parameters and for
11889 definitions also function body. */
11890 if (node->definition)
11891 simd_clone_adjust (n);
11892 else
11894 simd_clone_adjust_return_type (n);
11895 simd_clone_adjust_argument_types (n);
11899 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
11902 /* Entry point for IPA simd clone creation pass. */
11904 static unsigned int
11905 ipa_omp_simd_clone (void)
11907 struct cgraph_node *node;
11908 FOR_EACH_FUNCTION (node)
11909 expand_simd_clones (node);
11910 return 0;
11913 namespace {
11915 const pass_data pass_data_omp_simd_clone =
11917 SIMPLE_IPA_PASS, /* type */
11918 "simdclone", /* name */
11919 OPTGROUP_NONE, /* optinfo_flags */
11920 TV_NONE, /* tv_id */
11921 ( PROP_ssa | PROP_cfg ), /* properties_required */
11922 0, /* properties_provided */
11923 0, /* properties_destroyed */
11924 0, /* todo_flags_start */
11925 0, /* todo_flags_finish */
11928 class pass_omp_simd_clone : public simple_ipa_opt_pass
11930 public:
11931 pass_omp_simd_clone(gcc::context *ctxt)
11932 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
11935 /* opt_pass methods: */
11936 virtual bool gate (function *);
11937 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
11940 bool
11941 pass_omp_simd_clone::gate (function *)
11943 return ((flag_openmp || flag_openmp_simd
11944 || flag_cilkplus
11945 || (in_lto_p && !flag_wpa))
11946 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
11949 } // anon namespace
11951 simple_ipa_opt_pass *
11952 make_pass_omp_simd_clone (gcc::context *ctxt)
11954 return new pass_omp_simd_clone (ctxt);
11957 #include "gt-omp-low.h"