Concretize gimple_cond_set_{lhs|rhs}
[official-gcc.git] / gcc / omp-low.c
blob2ab49c3e9208c9d7ab3bb127bfea11e86dbc632f
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "rtl.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "gimple-fold.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-walk.h"
43 #include "tree-iterator.h"
44 #include "tree-inline.h"
45 #include "langhooks.h"
46 #include "diagnostic-core.h"
47 #include "gimple-ssa.h"
48 #include "cgraph.h"
49 #include "tree-cfg.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "tree-ssanames.h"
53 #include "tree-into-ssa.h"
54 #include "expr.h"
55 #include "tree-dfa.h"
56 #include "tree-ssa.h"
57 #include "flags.h"
58 #include "function.h"
59 #include "expr.h"
60 #include "tree-pass.h"
61 #include "except.h"
62 #include "splay-tree.h"
63 #include "optabs.h"
64 #include "cfgloop.h"
65 #include "target.h"
66 #include "omp-low.h"
67 #include "gimple-low.h"
68 #include "tree-cfgcleanup.h"
69 #include "pretty-print.h"
70 #include "ipa-prop.h"
71 #include "tree-nested.h"
72 #include "tree-eh.h"
73 #include "cilk.h"
76 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
77 phases. The first phase scans the function looking for OMP statements
78 and then for variables that must be replaced to satisfy data sharing
79 clauses. The second phase expands code for the constructs, as well as
80 re-gimplifying things when variables have been replaced with complex
81 expressions.
83 Final code generation is done by pass_expand_omp. The flowgraph is
84 scanned for parallel regions which are then moved to a new
85 function, to be invoked by the thread library. */
87 /* Parallel region information. Every parallel and workshare
88 directive is enclosed between two markers, the OMP_* directive
89 and a corresponding OMP_RETURN statement. */
91 struct omp_region
93 /* The enclosing region. */
94 struct omp_region *outer;
96 /* First child region. */
97 struct omp_region *inner;
99 /* Next peer region. */
100 struct omp_region *next;
102 /* Block containing the omp directive as its last stmt. */
103 basic_block entry;
105 /* Block containing the OMP_RETURN as its last stmt. */
106 basic_block exit;
108 /* Block containing the OMP_CONTINUE as its last stmt. */
109 basic_block cont;
111 /* If this is a combined parallel+workshare region, this is a list
112 of additional arguments needed by the combined parallel+workshare
113 library call. */
114 vec<tree, va_gc> *ws_args;
116 /* The code for the omp directive of this region. */
117 enum gimple_code type;
119 /* Schedule kind, only used for OMP_FOR type regions. */
120 enum omp_clause_schedule_kind sched_kind;
122 /* True if this is a combined parallel+workshare region. */
123 bool is_combined_parallel;
126 /* Context structure. Used to store information about each parallel
127 directive in the code. */
129 typedef struct omp_context
131 /* This field must be at the beginning, as we do "inheritance": Some
132 callback functions for tree-inline.c (e.g., omp_copy_decl)
133 receive a copy_body_data pointer that is up-casted to an
134 omp_context pointer. */
135 copy_body_data cb;
137 /* The tree of contexts corresponding to the encountered constructs. */
138 struct omp_context *outer;
139 gimple stmt;
141 /* Map variables to fields in a structure that allows communication
142 between sending and receiving threads. */
143 splay_tree field_map;
144 tree record_type;
145 tree sender_decl;
146 tree receiver_decl;
148 /* These are used just by task contexts, if task firstprivate fn is
149 needed. srecord_type is used to communicate from the thread
150 that encountered the task construct to task firstprivate fn,
151 record_type is allocated by GOMP_task, initialized by task firstprivate
152 fn and passed to the task body fn. */
153 splay_tree sfield_map;
154 tree srecord_type;
156 /* A chain of variables to add to the top-level block surrounding the
157 construct. In the case of a parallel, this is in the child function. */
158 tree block_vars;
160 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
161 barriers should jump to during omplower pass. */
162 tree cancel_label;
164 /* What to do with variables with implicitly determined sharing
165 attributes. */
166 enum omp_clause_default_kind default_kind;
168 /* Nesting depth of this context. Used to beautify error messages re
169 invalid gotos. The outermost ctx is depth 1, with depth 0 being
170 reserved for the main body of the function. */
171 int depth;
173 /* True if this parallel directive is nested within another. */
174 bool is_nested;
176 /* True if this construct can be cancelled. */
177 bool cancellable;
178 } omp_context;
181 struct omp_for_data_loop
183 tree v, n1, n2, step;
184 enum tree_code cond_code;
187 /* A structure describing the main elements of a parallel loop. */
189 struct omp_for_data
191 struct omp_for_data_loop loop;
192 tree chunk_size;
193 gimple_omp_for for_stmt;
194 tree pre, iter_type;
195 int collapse;
196 bool have_nowait, have_ordered;
197 enum omp_clause_schedule_kind sched_kind;
198 struct omp_for_data_loop *loops;
202 static splay_tree all_contexts;
203 static int taskreg_nesting_level;
204 static int target_nesting_level;
205 static struct omp_region *root_omp_region;
206 static bitmap task_shared_vars;
207 static vec<omp_context *> taskreg_contexts;
209 static void scan_omp (gimple_seq *, omp_context *);
210 static tree scan_omp_1_op (tree *, int *, void *);
212 #define WALK_SUBSTMTS \
213 case GIMPLE_BIND: \
214 case GIMPLE_TRY: \
215 case GIMPLE_CATCH: \
216 case GIMPLE_EH_FILTER: \
217 case GIMPLE_TRANSACTION: \
218 /* The sub-statements for these should be walked. */ \
219 *handled_ops_p = false; \
220 break;
222 /* Convenience function for calling scan_omp_1_op on tree operands. */
224 static inline tree
225 scan_omp_op (tree *tp, omp_context *ctx)
227 struct walk_stmt_info wi;
229 memset (&wi, 0, sizeof (wi));
230 wi.info = ctx;
231 wi.want_locations = true;
233 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
236 static void lower_omp (gimple_seq *, omp_context *);
237 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
238 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
240 /* Find an OpenMP clause of type KIND within CLAUSES. */
242 tree
243 find_omp_clause (tree clauses, enum omp_clause_code kind)
245 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
246 if (OMP_CLAUSE_CODE (clauses) == kind)
247 return clauses;
249 return NULL_TREE;
252 /* Return true if CTX is for an omp parallel. */
254 static inline bool
255 is_parallel_ctx (omp_context *ctx)
257 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
261 /* Return true if CTX is for an omp task. */
263 static inline bool
264 is_task_ctx (omp_context *ctx)
266 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
270 /* Return true if CTX is for an omp parallel or omp task. */
272 static inline bool
273 is_taskreg_ctx (omp_context *ctx)
275 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
276 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
280 /* Return true if REGION is a combined parallel+workshare region. */
282 static inline bool
283 is_combined_parallel (struct omp_region *region)
285 return region->is_combined_parallel;
289 /* Extract the header elements of parallel loop FOR_STMT and store
290 them into *FD. */
292 static void
293 extract_omp_for_data (gimple_omp_for for_stmt, struct omp_for_data *fd,
294 struct omp_for_data_loop *loops)
296 tree t, var, *collapse_iter, *collapse_count;
297 tree count = NULL_TREE, iter_type = long_integer_type_node;
298 struct omp_for_data_loop *loop;
299 int i;
300 struct omp_for_data_loop dummy_loop;
301 location_t loc = gimple_location (for_stmt);
302 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
303 bool distribute = gimple_omp_for_kind (for_stmt)
304 == GF_OMP_FOR_KIND_DISTRIBUTE;
306 fd->for_stmt = for_stmt;
307 fd->pre = NULL;
308 fd->collapse = gimple_omp_for_collapse (for_stmt);
309 if (fd->collapse > 1)
310 fd->loops = loops;
311 else
312 fd->loops = &fd->loop;
314 fd->have_nowait = distribute || simd;
315 fd->have_ordered = false;
316 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
317 fd->chunk_size = NULL_TREE;
318 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
319 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
320 collapse_iter = NULL;
321 collapse_count = NULL;
323 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
324 switch (OMP_CLAUSE_CODE (t))
326 case OMP_CLAUSE_NOWAIT:
327 fd->have_nowait = true;
328 break;
329 case OMP_CLAUSE_ORDERED:
330 fd->have_ordered = true;
331 break;
332 case OMP_CLAUSE_SCHEDULE:
333 gcc_assert (!distribute);
334 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
335 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
336 break;
337 case OMP_CLAUSE_DIST_SCHEDULE:
338 gcc_assert (distribute);
339 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
340 break;
341 case OMP_CLAUSE_COLLAPSE:
342 if (fd->collapse > 1)
344 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
345 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
347 break;
348 default:
349 break;
352 /* FIXME: for now map schedule(auto) to schedule(static).
353 There should be analysis to determine whether all iterations
354 are approximately the same amount of work (then schedule(static)
355 is best) or if it varies (then schedule(dynamic,N) is better). */
356 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
358 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
359 gcc_assert (fd->chunk_size == NULL);
361 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
362 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
363 gcc_assert (fd->chunk_size == NULL);
364 else if (fd->chunk_size == NULL)
366 /* We only need to compute a default chunk size for ordered
367 static loops and dynamic loops. */
368 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
369 || fd->have_ordered)
370 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
371 ? integer_zero_node : integer_one_node;
374 for (i = 0; i < fd->collapse; i++)
376 if (fd->collapse == 1)
377 loop = &fd->loop;
378 else if (loops != NULL)
379 loop = loops + i;
380 else
381 loop = &dummy_loop;
383 loop->v = gimple_omp_for_index (for_stmt, i);
384 gcc_assert (SSA_VAR_P (loop->v));
385 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
386 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
387 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
388 loop->n1 = gimple_omp_for_initial (for_stmt, i);
390 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
391 loop->n2 = gimple_omp_for_final (for_stmt, i);
392 switch (loop->cond_code)
394 case LT_EXPR:
395 case GT_EXPR:
396 break;
397 case NE_EXPR:
398 gcc_assert (gimple_omp_for_kind (for_stmt)
399 == GF_OMP_FOR_KIND_CILKSIMD
400 || (gimple_omp_for_kind (for_stmt)
401 == GF_OMP_FOR_KIND_CILKFOR));
402 break;
403 case LE_EXPR:
404 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
405 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
406 else
407 loop->n2 = fold_build2_loc (loc,
408 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
409 build_int_cst (TREE_TYPE (loop->n2), 1));
410 loop->cond_code = LT_EXPR;
411 break;
412 case GE_EXPR:
413 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
414 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
415 else
416 loop->n2 = fold_build2_loc (loc,
417 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
418 build_int_cst (TREE_TYPE (loop->n2), 1));
419 loop->cond_code = GT_EXPR;
420 break;
421 default:
422 gcc_unreachable ();
425 t = gimple_omp_for_incr (for_stmt, i);
426 gcc_assert (TREE_OPERAND (t, 0) == var);
427 switch (TREE_CODE (t))
429 case PLUS_EXPR:
430 loop->step = TREE_OPERAND (t, 1);
431 break;
432 case POINTER_PLUS_EXPR:
433 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
434 break;
435 case MINUS_EXPR:
436 loop->step = TREE_OPERAND (t, 1);
437 loop->step = fold_build1_loc (loc,
438 NEGATE_EXPR, TREE_TYPE (loop->step),
439 loop->step);
440 break;
441 default:
442 gcc_unreachable ();
445 if (simd
446 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
447 && !fd->have_ordered))
449 if (fd->collapse == 1)
450 iter_type = TREE_TYPE (loop->v);
451 else if (i == 0
452 || TYPE_PRECISION (iter_type)
453 < TYPE_PRECISION (TREE_TYPE (loop->v)))
454 iter_type
455 = build_nonstandard_integer_type
456 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
458 else if (iter_type != long_long_unsigned_type_node)
460 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
461 iter_type = long_long_unsigned_type_node;
462 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
463 && TYPE_PRECISION (TREE_TYPE (loop->v))
464 >= TYPE_PRECISION (iter_type))
466 tree n;
468 if (loop->cond_code == LT_EXPR)
469 n = fold_build2_loc (loc,
470 PLUS_EXPR, TREE_TYPE (loop->v),
471 loop->n2, loop->step);
472 else
473 n = loop->n1;
474 if (TREE_CODE (n) != INTEGER_CST
475 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
476 iter_type = long_long_unsigned_type_node;
478 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
479 > TYPE_PRECISION (iter_type))
481 tree n1, n2;
483 if (loop->cond_code == LT_EXPR)
485 n1 = loop->n1;
486 n2 = fold_build2_loc (loc,
487 PLUS_EXPR, TREE_TYPE (loop->v),
488 loop->n2, loop->step);
490 else
492 n1 = fold_build2_loc (loc,
493 MINUS_EXPR, TREE_TYPE (loop->v),
494 loop->n2, loop->step);
495 n2 = loop->n1;
497 if (TREE_CODE (n1) != INTEGER_CST
498 || TREE_CODE (n2) != INTEGER_CST
499 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
500 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
501 iter_type = long_long_unsigned_type_node;
505 if (collapse_count && *collapse_count == NULL)
507 t = fold_binary (loop->cond_code, boolean_type_node,
508 fold_convert (TREE_TYPE (loop->v), loop->n1),
509 fold_convert (TREE_TYPE (loop->v), loop->n2));
510 if (t && integer_zerop (t))
511 count = build_zero_cst (long_long_unsigned_type_node);
512 else if ((i == 0 || count != NULL_TREE)
513 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
514 && TREE_CONSTANT (loop->n1)
515 && TREE_CONSTANT (loop->n2)
516 && TREE_CODE (loop->step) == INTEGER_CST)
518 tree itype = TREE_TYPE (loop->v);
520 if (POINTER_TYPE_P (itype))
521 itype = signed_type_for (itype);
522 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
523 t = fold_build2_loc (loc,
524 PLUS_EXPR, itype,
525 fold_convert_loc (loc, itype, loop->step), t);
526 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
527 fold_convert_loc (loc, itype, loop->n2));
528 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
529 fold_convert_loc (loc, itype, loop->n1));
530 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
531 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
532 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
533 fold_build1_loc (loc, NEGATE_EXPR, itype,
534 fold_convert_loc (loc, itype,
535 loop->step)));
536 else
537 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
538 fold_convert_loc (loc, itype, loop->step));
539 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
540 if (count != NULL_TREE)
541 count = fold_build2_loc (loc,
542 MULT_EXPR, long_long_unsigned_type_node,
543 count, t);
544 else
545 count = t;
546 if (TREE_CODE (count) != INTEGER_CST)
547 count = NULL_TREE;
549 else if (count && !integer_zerop (count))
550 count = NULL_TREE;
554 if (count
555 && !simd
556 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
557 || fd->have_ordered))
559 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
560 iter_type = long_long_unsigned_type_node;
561 else
562 iter_type = long_integer_type_node;
564 else if (collapse_iter && *collapse_iter != NULL)
565 iter_type = TREE_TYPE (*collapse_iter);
566 fd->iter_type = iter_type;
567 if (collapse_iter && *collapse_iter == NULL)
568 *collapse_iter = create_tmp_var (iter_type, ".iter");
569 if (collapse_count && *collapse_count == NULL)
571 if (count)
572 *collapse_count = fold_convert_loc (loc, iter_type, count);
573 else
574 *collapse_count = create_tmp_var (iter_type, ".count");
577 if (fd->collapse > 1)
579 fd->loop.v = *collapse_iter;
580 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
581 fd->loop.n2 = *collapse_count;
582 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
583 fd->loop.cond_code = LT_EXPR;
588 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
589 is the immediate dominator of PAR_ENTRY_BB, return true if there
590 are no data dependencies that would prevent expanding the parallel
591 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
593 When expanding a combined parallel+workshare region, the call to
594 the child function may need additional arguments in the case of
595 GIMPLE_OMP_FOR regions. In some cases, these arguments are
596 computed out of variables passed in from the parent to the child
597 via 'struct .omp_data_s'. For instance:
599 #pragma omp parallel for schedule (guided, i * 4)
600 for (j ...)
602 Is lowered into:
604 # BLOCK 2 (PAR_ENTRY_BB)
605 .omp_data_o.i = i;
606 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
608 # BLOCK 3 (WS_ENTRY_BB)
609 .omp_data_i = &.omp_data_o;
610 D.1667 = .omp_data_i->i;
611 D.1598 = D.1667 * 4;
612 #pragma omp for schedule (guided, D.1598)
614 When we outline the parallel region, the call to the child function
615 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
616 that value is computed *after* the call site. So, in principle we
617 cannot do the transformation.
619 To see whether the code in WS_ENTRY_BB blocks the combined
620 parallel+workshare call, we collect all the variables used in the
621 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
622 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
623 call.
625 FIXME. If we had the SSA form built at this point, we could merely
626 hoist the code in block 3 into block 2 and be done with it. But at
627 this point we don't have dataflow information and though we could
628 hack something up here, it is really not worth the aggravation. */
630 static bool
631 workshare_safe_to_combine_p (basic_block ws_entry_bb)
633 struct omp_for_data fd;
634 gimple ws_stmt = last_stmt (ws_entry_bb);
636 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
637 return true;
639 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
641 extract_omp_for_data (as_a <gimple_omp_for> (ws_stmt), &fd, NULL);
643 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
644 return false;
645 if (fd.iter_type != long_integer_type_node)
646 return false;
648 /* FIXME. We give up too easily here. If any of these arguments
649 are not constants, they will likely involve variables that have
650 been mapped into fields of .omp_data_s for sharing with the child
651 function. With appropriate data flow, it would be possible to
652 see through this. */
653 if (!is_gimple_min_invariant (fd.loop.n1)
654 || !is_gimple_min_invariant (fd.loop.n2)
655 || !is_gimple_min_invariant (fd.loop.step)
656 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
657 return false;
659 return true;
663 /* Collect additional arguments needed to emit a combined
664 parallel+workshare call. WS_STMT is the workshare directive being
665 expanded. */
667 static vec<tree, va_gc> *
668 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
670 tree t;
671 location_t loc = gimple_location (ws_stmt);
672 vec<tree, va_gc> *ws_args;
674 if (gimple_omp_for for_stmt = dyn_cast <gimple_omp_for> (ws_stmt))
676 struct omp_for_data fd;
677 tree n1, n2;
679 extract_omp_for_data (for_stmt, &fd, NULL);
680 n1 = fd.loop.n1;
681 n2 = fd.loop.n2;
683 if (gimple_omp_for_combined_into_p (for_stmt))
685 tree innerc
686 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
687 OMP_CLAUSE__LOOPTEMP_);
688 gcc_assert (innerc);
689 n1 = OMP_CLAUSE_DECL (innerc);
690 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
691 OMP_CLAUSE__LOOPTEMP_);
692 gcc_assert (innerc);
693 n2 = OMP_CLAUSE_DECL (innerc);
696 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
698 t = fold_convert_loc (loc, long_integer_type_node, n1);
699 ws_args->quick_push (t);
701 t = fold_convert_loc (loc, long_integer_type_node, n2);
702 ws_args->quick_push (t);
704 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
705 ws_args->quick_push (t);
707 if (fd.chunk_size)
709 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
710 ws_args->quick_push (t);
713 return ws_args;
715 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
717 /* Number of sections is equal to the number of edges from the
718 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
719 the exit of the sections region. */
720 basic_block bb = single_succ (gimple_bb (ws_stmt));
721 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
722 vec_alloc (ws_args, 1);
723 ws_args->quick_push (t);
724 return ws_args;
727 gcc_unreachable ();
731 /* Discover whether REGION is a combined parallel+workshare region. */
733 static void
734 determine_parallel_type (struct omp_region *region)
736 basic_block par_entry_bb, par_exit_bb;
737 basic_block ws_entry_bb, ws_exit_bb;
739 if (region == NULL || region->inner == NULL
740 || region->exit == NULL || region->inner->exit == NULL
741 || region->inner->cont == NULL)
742 return;
744 /* We only support parallel+for and parallel+sections. */
745 if (region->type != GIMPLE_OMP_PARALLEL
746 || (region->inner->type != GIMPLE_OMP_FOR
747 && region->inner->type != GIMPLE_OMP_SECTIONS))
748 return;
750 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
751 WS_EXIT_BB -> PAR_EXIT_BB. */
752 par_entry_bb = region->entry;
753 par_exit_bb = region->exit;
754 ws_entry_bb = region->inner->entry;
755 ws_exit_bb = region->inner->exit;
757 if (single_succ (par_entry_bb) == ws_entry_bb
758 && single_succ (ws_exit_bb) == par_exit_bb
759 && workshare_safe_to_combine_p (ws_entry_bb)
760 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
761 || (last_and_only_stmt (ws_entry_bb)
762 && last_and_only_stmt (par_exit_bb))))
764 gimple par_stmt = last_stmt (par_entry_bb);
765 gimple ws_stmt = last_stmt (ws_entry_bb);
767 if (region->inner->type == GIMPLE_OMP_FOR)
769 /* If this is a combined parallel loop, we need to determine
770 whether or not to use the combined library calls. There
771 are two cases where we do not apply the transformation:
772 static loops and any kind of ordered loop. In the first
773 case, we already open code the loop so there is no need
774 to do anything else. In the latter case, the combined
775 parallel loop call would still need extra synchronization
776 to implement ordered semantics, so there would not be any
777 gain in using the combined call. */
778 tree clauses = gimple_omp_for_clauses (ws_stmt);
779 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
780 if (c == NULL
781 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
782 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
784 region->is_combined_parallel = false;
785 region->inner->is_combined_parallel = false;
786 return;
790 region->is_combined_parallel = true;
791 region->inner->is_combined_parallel = true;
792 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
797 /* Return true if EXPR is variable sized. */
799 static inline bool
800 is_variable_sized (const_tree expr)
802 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
805 /* Return true if DECL is a reference type. */
807 static inline bool
808 is_reference (tree decl)
810 return lang_hooks.decls.omp_privatize_by_reference (decl);
813 /* Lookup variables in the decl or field splay trees. The "maybe" form
814 allows for the variable form to not have been entered, otherwise we
815 assert that the variable must have been entered. */
817 static inline tree
818 lookup_decl (tree var, omp_context *ctx)
820 tree *n = ctx->cb.decl_map->get (var);
821 return *n;
824 static inline tree
825 maybe_lookup_decl (const_tree var, omp_context *ctx)
827 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
828 return n ? *n : NULL_TREE;
831 static inline tree
832 lookup_field (tree var, omp_context *ctx)
834 splay_tree_node n;
835 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
836 return (tree) n->value;
839 static inline tree
840 lookup_sfield (tree var, omp_context *ctx)
842 splay_tree_node n;
843 n = splay_tree_lookup (ctx->sfield_map
844 ? ctx->sfield_map : ctx->field_map,
845 (splay_tree_key) var);
846 return (tree) n->value;
849 static inline tree
850 maybe_lookup_field (tree var, omp_context *ctx)
852 splay_tree_node n;
853 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
854 return n ? (tree) n->value : NULL_TREE;
857 /* Return true if DECL should be copied by pointer. SHARED_CTX is
858 the parallel context if DECL is to be shared. */
860 static bool
861 use_pointer_for_field (tree decl, omp_context *shared_ctx)
863 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
864 return true;
866 /* We can only use copy-in/copy-out semantics for shared variables
867 when we know the value is not accessible from an outer scope. */
868 if (shared_ctx)
870 /* ??? Trivially accessible from anywhere. But why would we even
871 be passing an address in this case? Should we simply assert
872 this to be false, or should we have a cleanup pass that removes
873 these from the list of mappings? */
874 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
875 return true;
877 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
878 without analyzing the expression whether or not its location
879 is accessible to anyone else. In the case of nested parallel
880 regions it certainly may be. */
881 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
882 return true;
884 /* Do not use copy-in/copy-out for variables that have their
885 address taken. */
886 if (TREE_ADDRESSABLE (decl))
887 return true;
889 /* lower_send_shared_vars only uses copy-in, but not copy-out
890 for these. */
891 if (TREE_READONLY (decl)
892 || ((TREE_CODE (decl) == RESULT_DECL
893 || TREE_CODE (decl) == PARM_DECL)
894 && DECL_BY_REFERENCE (decl)))
895 return false;
897 /* Disallow copy-in/out in nested parallel if
898 decl is shared in outer parallel, otherwise
899 each thread could store the shared variable
900 in its own copy-in location, making the
901 variable no longer really shared. */
902 if (shared_ctx->is_nested)
904 omp_context *up;
906 for (up = shared_ctx->outer; up; up = up->outer)
907 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
908 break;
910 if (up)
912 tree c;
914 for (c = gimple_omp_taskreg_clauses (up->stmt);
915 c; c = OMP_CLAUSE_CHAIN (c))
916 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
917 && OMP_CLAUSE_DECL (c) == decl)
918 break;
920 if (c)
921 goto maybe_mark_addressable_and_ret;
925 /* For tasks avoid using copy-in/out. As tasks can be
926 deferred or executed in different thread, when GOMP_task
927 returns, the task hasn't necessarily terminated. */
928 if (is_task_ctx (shared_ctx))
930 tree outer;
931 maybe_mark_addressable_and_ret:
932 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
933 if (is_gimple_reg (outer))
935 /* Taking address of OUTER in lower_send_shared_vars
936 might need regimplification of everything that uses the
937 variable. */
938 if (!task_shared_vars)
939 task_shared_vars = BITMAP_ALLOC (NULL);
940 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
941 TREE_ADDRESSABLE (outer) = 1;
943 return true;
947 return false;
950 /* Construct a new automatic decl similar to VAR. */
952 static tree
953 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
955 tree copy = copy_var_decl (var, name, type);
957 DECL_CONTEXT (copy) = current_function_decl;
958 DECL_CHAIN (copy) = ctx->block_vars;
959 ctx->block_vars = copy;
961 return copy;
964 static tree
965 omp_copy_decl_1 (tree var, omp_context *ctx)
967 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
970 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
971 as appropriate. */
972 static tree
973 omp_build_component_ref (tree obj, tree field)
975 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
976 if (TREE_THIS_VOLATILE (field))
977 TREE_THIS_VOLATILE (ret) |= 1;
978 if (TREE_READONLY (field))
979 TREE_READONLY (ret) |= 1;
980 return ret;
983 /* Build tree nodes to access the field for VAR on the receiver side. */
985 static tree
986 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
988 tree x, field = lookup_field (var, ctx);
990 /* If the receiver record type was remapped in the child function,
991 remap the field into the new record type. */
992 x = maybe_lookup_field (field, ctx);
993 if (x != NULL)
994 field = x;
996 x = build_simple_mem_ref (ctx->receiver_decl);
997 x = omp_build_component_ref (x, field);
998 if (by_ref)
999 x = build_simple_mem_ref (x);
1001 return x;
1004 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1005 of a parallel, this is a component reference; for workshare constructs
1006 this is some variable. */
1008 static tree
1009 build_outer_var_ref (tree var, omp_context *ctx)
1011 tree x;
1013 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1014 x = var;
1015 else if (is_variable_sized (var))
1017 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1018 x = build_outer_var_ref (x, ctx);
1019 x = build_simple_mem_ref (x);
1021 else if (is_taskreg_ctx (ctx))
1023 bool by_ref = use_pointer_for_field (var, NULL);
1024 x = build_receiver_ref (var, by_ref, ctx);
1026 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1027 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1029 /* #pragma omp simd isn't a worksharing construct, and can reference even
1030 private vars in its linear etc. clauses. */
1031 x = NULL_TREE;
1032 if (ctx->outer && is_taskreg_ctx (ctx))
1033 x = lookup_decl (var, ctx->outer);
1034 else if (ctx->outer)
1035 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1036 if (x == NULL_TREE)
1037 x = var;
1039 else if (ctx->outer)
1040 x = lookup_decl (var, ctx->outer);
1041 else if (is_reference (var))
1042 /* This can happen with orphaned constructs. If var is reference, it is
1043 possible it is shared and as such valid. */
1044 x = var;
1045 else
1046 gcc_unreachable ();
1048 if (is_reference (var))
1049 x = build_simple_mem_ref (x);
1051 return x;
1054 /* Build tree nodes to access the field for VAR on the sender side. */
1056 static tree
1057 build_sender_ref (tree var, omp_context *ctx)
1059 tree field = lookup_sfield (var, ctx);
1060 return omp_build_component_ref (ctx->sender_decl, field);
1063 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1065 static void
1066 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1068 tree field, type, sfield = NULL_TREE;
1070 gcc_assert ((mask & 1) == 0
1071 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1072 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1073 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1075 type = TREE_TYPE (var);
1076 if (mask & 4)
1078 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1079 type = build_pointer_type (build_pointer_type (type));
1081 else if (by_ref)
1082 type = build_pointer_type (type);
1083 else if ((mask & 3) == 1 && is_reference (var))
1084 type = TREE_TYPE (type);
1086 field = build_decl (DECL_SOURCE_LOCATION (var),
1087 FIELD_DECL, DECL_NAME (var), type);
1089 /* Remember what variable this field was created for. This does have a
1090 side effect of making dwarf2out ignore this member, so for helpful
1091 debugging we clear it later in delete_omp_context. */
1092 DECL_ABSTRACT_ORIGIN (field) = var;
1093 if (type == TREE_TYPE (var))
1095 DECL_ALIGN (field) = DECL_ALIGN (var);
1096 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1097 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1099 else
1100 DECL_ALIGN (field) = TYPE_ALIGN (type);
1102 if ((mask & 3) == 3)
1104 insert_field_into_struct (ctx->record_type, field);
1105 if (ctx->srecord_type)
1107 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1108 FIELD_DECL, DECL_NAME (var), type);
1109 DECL_ABSTRACT_ORIGIN (sfield) = var;
1110 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1111 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1112 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1113 insert_field_into_struct (ctx->srecord_type, sfield);
1116 else
1118 if (ctx->srecord_type == NULL_TREE)
1120 tree t;
1122 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1123 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1124 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1126 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1127 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1128 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1129 insert_field_into_struct (ctx->srecord_type, sfield);
1130 splay_tree_insert (ctx->sfield_map,
1131 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1132 (splay_tree_value) sfield);
1135 sfield = field;
1136 insert_field_into_struct ((mask & 1) ? ctx->record_type
1137 : ctx->srecord_type, field);
1140 if (mask & 1)
1141 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1142 (splay_tree_value) field);
1143 if ((mask & 2) && ctx->sfield_map)
1144 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1145 (splay_tree_value) sfield);
1148 static tree
1149 install_var_local (tree var, omp_context *ctx)
1151 tree new_var = omp_copy_decl_1 (var, ctx);
1152 insert_decl_map (&ctx->cb, var, new_var);
1153 return new_var;
1156 /* Adjust the replacement for DECL in CTX for the new context. This means
1157 copying the DECL_VALUE_EXPR, and fixing up the type. */
1159 static void
1160 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1162 tree new_decl, size;
1164 new_decl = lookup_decl (decl, ctx);
1166 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1168 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1169 && DECL_HAS_VALUE_EXPR_P (decl))
1171 tree ve = DECL_VALUE_EXPR (decl);
1172 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1173 SET_DECL_VALUE_EXPR (new_decl, ve);
1174 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1177 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1179 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1180 if (size == error_mark_node)
1181 size = TYPE_SIZE (TREE_TYPE (new_decl));
1182 DECL_SIZE (new_decl) = size;
1184 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1185 if (size == error_mark_node)
1186 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1187 DECL_SIZE_UNIT (new_decl) = size;
1191 /* The callback for remap_decl. Search all containing contexts for a
1192 mapping of the variable; this avoids having to duplicate the splay
1193 tree ahead of time. We know a mapping doesn't already exist in the
1194 given context. Create new mappings to implement default semantics. */
1196 static tree
1197 omp_copy_decl (tree var, copy_body_data *cb)
1199 omp_context *ctx = (omp_context *) cb;
1200 tree new_var;
1202 if (TREE_CODE (var) == LABEL_DECL)
1204 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1205 DECL_CONTEXT (new_var) = current_function_decl;
1206 insert_decl_map (&ctx->cb, var, new_var);
1207 return new_var;
1210 while (!is_taskreg_ctx (ctx))
1212 ctx = ctx->outer;
1213 if (ctx == NULL)
1214 return var;
1215 new_var = maybe_lookup_decl (var, ctx);
1216 if (new_var)
1217 return new_var;
1220 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1221 return var;
1223 return error_mark_node;
1227 /* Debugging dumps for parallel regions. */
1228 void dump_omp_region (FILE *, struct omp_region *, int);
1229 void debug_omp_region (struct omp_region *);
1230 void debug_all_omp_regions (void);
1232 /* Dump the parallel region tree rooted at REGION. */
1234 void
1235 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1237 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1238 gimple_code_name[region->type]);
1240 if (region->inner)
1241 dump_omp_region (file, region->inner, indent + 4);
1243 if (region->cont)
1245 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1246 region->cont->index);
1249 if (region->exit)
1250 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1251 region->exit->index);
1252 else
1253 fprintf (file, "%*s[no exit marker]\n", indent, "");
1255 if (region->next)
1256 dump_omp_region (file, region->next, indent);
1259 DEBUG_FUNCTION void
1260 debug_omp_region (struct omp_region *region)
1262 dump_omp_region (stderr, region, 0);
1265 DEBUG_FUNCTION void
1266 debug_all_omp_regions (void)
1268 dump_omp_region (stderr, root_omp_region, 0);
1272 /* Create a new parallel region starting at STMT inside region PARENT. */
1274 static struct omp_region *
1275 new_omp_region (basic_block bb, enum gimple_code type,
1276 struct omp_region *parent)
1278 struct omp_region *region = XCNEW (struct omp_region);
1280 region->outer = parent;
1281 region->entry = bb;
1282 region->type = type;
1284 if (parent)
1286 /* This is a nested region. Add it to the list of inner
1287 regions in PARENT. */
1288 region->next = parent->inner;
1289 parent->inner = region;
1291 else
1293 /* This is a toplevel region. Add it to the list of toplevel
1294 regions in ROOT_OMP_REGION. */
1295 region->next = root_omp_region;
1296 root_omp_region = region;
1299 return region;
1302 /* Release the memory associated with the region tree rooted at REGION. */
1304 static void
1305 free_omp_region_1 (struct omp_region *region)
1307 struct omp_region *i, *n;
1309 for (i = region->inner; i ; i = n)
1311 n = i->next;
1312 free_omp_region_1 (i);
1315 free (region);
1318 /* Release the memory for the entire omp region tree. */
1320 void
1321 free_omp_regions (void)
1323 struct omp_region *r, *n;
1324 for (r = root_omp_region; r ; r = n)
1326 n = r->next;
1327 free_omp_region_1 (r);
1329 root_omp_region = NULL;
1333 /* Create a new context, with OUTER_CTX being the surrounding context. */
1335 static omp_context *
1336 new_omp_context (gimple stmt, omp_context *outer_ctx)
1338 omp_context *ctx = XCNEW (omp_context);
1340 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1341 (splay_tree_value) ctx);
1342 ctx->stmt = stmt;
1344 if (outer_ctx)
1346 ctx->outer = outer_ctx;
1347 ctx->cb = outer_ctx->cb;
1348 ctx->cb.block = NULL;
1349 ctx->depth = outer_ctx->depth + 1;
1351 else
1353 ctx->cb.src_fn = current_function_decl;
1354 ctx->cb.dst_fn = current_function_decl;
1355 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1356 gcc_checking_assert (ctx->cb.src_node);
1357 ctx->cb.dst_node = ctx->cb.src_node;
1358 ctx->cb.src_cfun = cfun;
1359 ctx->cb.copy_decl = omp_copy_decl;
1360 ctx->cb.eh_lp_nr = 0;
1361 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1362 ctx->depth = 1;
1365 ctx->cb.decl_map = new hash_map<tree, tree>;
1367 return ctx;
1370 static gimple_seq maybe_catch_exception (gimple_seq);
1372 /* Finalize task copyfn. */
1374 static void
1375 finalize_task_copyfn (gimple_omp_task task_stmt)
1377 struct function *child_cfun;
1378 tree child_fn;
1379 gimple_seq seq = NULL, new_seq;
1380 gimple_bind bind;
1382 child_fn = gimple_omp_task_copy_fn (task_stmt);
1383 if (child_fn == NULL_TREE)
1384 return;
1386 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1387 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1389 push_cfun (child_cfun);
1390 bind = gimplify_body (child_fn, false);
1391 gimple_seq_add_stmt (&seq, bind);
1392 new_seq = maybe_catch_exception (seq);
1393 if (new_seq != seq)
1395 bind = gimple_build_bind (NULL, new_seq, NULL);
1396 seq = NULL;
1397 gimple_seq_add_stmt (&seq, bind);
1399 gimple_set_body (child_fn, seq);
1400 pop_cfun ();
1402 /* Inform the callgraph about the new function. */
1403 cgraph_node::add_new_function (child_fn, false);
1406 /* Destroy a omp_context data structures. Called through the splay tree
1407 value delete callback. */
1409 static void
1410 delete_omp_context (splay_tree_value value)
1412 omp_context *ctx = (omp_context *) value;
1414 delete ctx->cb.decl_map;
1416 if (ctx->field_map)
1417 splay_tree_delete (ctx->field_map);
1418 if (ctx->sfield_map)
1419 splay_tree_delete (ctx->sfield_map);
1421 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1422 it produces corrupt debug information. */
1423 if (ctx->record_type)
1425 tree t;
1426 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1427 DECL_ABSTRACT_ORIGIN (t) = NULL;
1429 if (ctx->srecord_type)
1431 tree t;
1432 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1433 DECL_ABSTRACT_ORIGIN (t) = NULL;
1436 if (is_task_ctx (ctx))
1437 finalize_task_copyfn (as_a <gimple_omp_task> (ctx->stmt));
1439 XDELETE (ctx);
1442 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1443 context. */
1445 static void
1446 fixup_child_record_type (omp_context *ctx)
1448 tree f, type = ctx->record_type;
1450 /* ??? It isn't sufficient to just call remap_type here, because
1451 variably_modified_type_p doesn't work the way we expect for
1452 record types. Testing each field for whether it needs remapping
1453 and creating a new record by hand works, however. */
1454 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1455 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1456 break;
1457 if (f)
1459 tree name, new_fields = NULL;
1461 type = lang_hooks.types.make_type (RECORD_TYPE);
1462 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1463 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1464 TYPE_DECL, name, type);
1465 TYPE_NAME (type) = name;
1467 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1469 tree new_f = copy_node (f);
1470 DECL_CONTEXT (new_f) = type;
1471 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1472 DECL_CHAIN (new_f) = new_fields;
1473 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1474 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1475 &ctx->cb, NULL);
1476 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1477 &ctx->cb, NULL);
1478 new_fields = new_f;
1480 /* Arrange to be able to look up the receiver field
1481 given the sender field. */
1482 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1483 (splay_tree_value) new_f);
1485 TYPE_FIELDS (type) = nreverse (new_fields);
1486 layout_type (type);
1489 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1492 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1493 specified by CLAUSES. */
1495 static void
1496 scan_sharing_clauses (tree clauses, omp_context *ctx)
1498 tree c, decl;
1499 bool scan_array_reductions = false;
1501 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1503 bool by_ref;
1505 switch (OMP_CLAUSE_CODE (c))
1507 case OMP_CLAUSE_PRIVATE:
1508 decl = OMP_CLAUSE_DECL (c);
1509 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1510 goto do_private;
1511 else if (!is_variable_sized (decl))
1512 install_var_local (decl, ctx);
1513 break;
1515 case OMP_CLAUSE_SHARED:
1516 decl = OMP_CLAUSE_DECL (c);
1517 /* Ignore shared directives in teams construct. */
1518 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1520 /* Global variables don't need to be copied,
1521 the receiver side will use them directly. */
1522 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1523 if (is_global_var (odecl))
1524 break;
1525 insert_decl_map (&ctx->cb, decl, odecl);
1526 break;
1528 gcc_assert (is_taskreg_ctx (ctx));
1529 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1530 || !is_variable_sized (decl));
1531 /* Global variables don't need to be copied,
1532 the receiver side will use them directly. */
1533 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1534 break;
1535 by_ref = use_pointer_for_field (decl, ctx);
1536 if (! TREE_READONLY (decl)
1537 || TREE_ADDRESSABLE (decl)
1538 || by_ref
1539 || is_reference (decl))
1541 install_var_field (decl, by_ref, 3, ctx);
1542 install_var_local (decl, ctx);
1543 break;
1545 /* We don't need to copy const scalar vars back. */
1546 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1547 goto do_private;
1549 case OMP_CLAUSE_LASTPRIVATE:
1550 /* Let the corresponding firstprivate clause create
1551 the variable. */
1552 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1553 break;
1554 /* FALLTHRU */
1556 case OMP_CLAUSE_FIRSTPRIVATE:
1557 case OMP_CLAUSE_REDUCTION:
1558 case OMP_CLAUSE_LINEAR:
1559 decl = OMP_CLAUSE_DECL (c);
1560 do_private:
1561 if (is_variable_sized (decl))
1563 if (is_task_ctx (ctx))
1564 install_var_field (decl, false, 1, ctx);
1565 break;
1567 else if (is_taskreg_ctx (ctx))
1569 bool global
1570 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1571 by_ref = use_pointer_for_field (decl, NULL);
1573 if (is_task_ctx (ctx)
1574 && (global || by_ref || is_reference (decl)))
1576 install_var_field (decl, false, 1, ctx);
1577 if (!global)
1578 install_var_field (decl, by_ref, 2, ctx);
1580 else if (!global)
1581 install_var_field (decl, by_ref, 3, ctx);
1583 install_var_local (decl, ctx);
1584 break;
1586 case OMP_CLAUSE__LOOPTEMP_:
1587 gcc_assert (is_parallel_ctx (ctx));
1588 decl = OMP_CLAUSE_DECL (c);
1589 install_var_field (decl, false, 3, ctx);
1590 install_var_local (decl, ctx);
1591 break;
1593 case OMP_CLAUSE_COPYPRIVATE:
1594 case OMP_CLAUSE_COPYIN:
1595 decl = OMP_CLAUSE_DECL (c);
1596 by_ref = use_pointer_for_field (decl, NULL);
1597 install_var_field (decl, by_ref, 3, ctx);
1598 break;
1600 case OMP_CLAUSE_DEFAULT:
1601 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1602 break;
1604 case OMP_CLAUSE_FINAL:
1605 case OMP_CLAUSE_IF:
1606 case OMP_CLAUSE_NUM_THREADS:
1607 case OMP_CLAUSE_NUM_TEAMS:
1608 case OMP_CLAUSE_THREAD_LIMIT:
1609 case OMP_CLAUSE_DEVICE:
1610 case OMP_CLAUSE_SCHEDULE:
1611 case OMP_CLAUSE_DIST_SCHEDULE:
1612 case OMP_CLAUSE_DEPEND:
1613 case OMP_CLAUSE__CILK_FOR_COUNT_:
1614 if (ctx->outer)
1615 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1616 break;
1618 case OMP_CLAUSE_TO:
1619 case OMP_CLAUSE_FROM:
1620 case OMP_CLAUSE_MAP:
1621 if (ctx->outer)
1622 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1623 decl = OMP_CLAUSE_DECL (c);
1624 /* Global variables with "omp declare target" attribute
1625 don't need to be copied, the receiver side will use them
1626 directly. */
1627 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1628 && DECL_P (decl)
1629 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1630 && lookup_attribute ("omp declare target",
1631 DECL_ATTRIBUTES (decl)))
1632 break;
1633 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1634 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1636 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1637 #pragma omp target data, there is nothing to map for
1638 those. */
1639 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1640 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1641 break;
1643 if (DECL_P (decl))
1645 if (DECL_SIZE (decl)
1646 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1648 tree decl2 = DECL_VALUE_EXPR (decl);
1649 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1650 decl2 = TREE_OPERAND (decl2, 0);
1651 gcc_assert (DECL_P (decl2));
1652 install_var_field (decl2, true, 3, ctx);
1653 install_var_local (decl2, ctx);
1654 install_var_local (decl, ctx);
1656 else
1658 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1659 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1660 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1661 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1662 install_var_field (decl, true, 7, ctx);
1663 else
1664 install_var_field (decl, true, 3, ctx);
1665 if (gimple_omp_target_kind (ctx->stmt)
1666 == GF_OMP_TARGET_KIND_REGION)
1667 install_var_local (decl, ctx);
1670 else
1672 tree base = get_base_address (decl);
1673 tree nc = OMP_CLAUSE_CHAIN (c);
1674 if (DECL_P (base)
1675 && nc != NULL_TREE
1676 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1677 && OMP_CLAUSE_DECL (nc) == base
1678 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1679 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1681 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1682 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1684 else
1686 if (ctx->outer)
1688 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1689 decl = OMP_CLAUSE_DECL (c);
1691 gcc_assert (!splay_tree_lookup (ctx->field_map,
1692 (splay_tree_key) decl));
1693 tree field
1694 = build_decl (OMP_CLAUSE_LOCATION (c),
1695 FIELD_DECL, NULL_TREE, ptr_type_node);
1696 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1697 insert_field_into_struct (ctx->record_type, field);
1698 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1699 (splay_tree_value) field);
1702 break;
1704 case OMP_CLAUSE_NOWAIT:
1705 case OMP_CLAUSE_ORDERED:
1706 case OMP_CLAUSE_COLLAPSE:
1707 case OMP_CLAUSE_UNTIED:
1708 case OMP_CLAUSE_MERGEABLE:
1709 case OMP_CLAUSE_PROC_BIND:
1710 case OMP_CLAUSE_SAFELEN:
1711 break;
1713 case OMP_CLAUSE_ALIGNED:
1714 decl = OMP_CLAUSE_DECL (c);
1715 if (is_global_var (decl)
1716 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1717 install_var_local (decl, ctx);
1718 break;
1720 default:
1721 gcc_unreachable ();
1725 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1727 switch (OMP_CLAUSE_CODE (c))
1729 case OMP_CLAUSE_LASTPRIVATE:
1730 /* Let the corresponding firstprivate clause create
1731 the variable. */
1732 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1733 scan_array_reductions = true;
1734 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1735 break;
1736 /* FALLTHRU */
1738 case OMP_CLAUSE_PRIVATE:
1739 case OMP_CLAUSE_FIRSTPRIVATE:
1740 case OMP_CLAUSE_REDUCTION:
1741 case OMP_CLAUSE_LINEAR:
1742 decl = OMP_CLAUSE_DECL (c);
1743 if (is_variable_sized (decl))
1744 install_var_local (decl, ctx);
1745 fixup_remapped_decl (decl, ctx,
1746 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1747 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1748 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1749 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1750 scan_array_reductions = true;
1751 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1752 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1753 scan_array_reductions = true;
1754 break;
1756 case OMP_CLAUSE_SHARED:
1757 /* Ignore shared directives in teams construct. */
1758 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1759 break;
1760 decl = OMP_CLAUSE_DECL (c);
1761 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1762 fixup_remapped_decl (decl, ctx, false);
1763 break;
1765 case OMP_CLAUSE_MAP:
1766 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1767 break;
1768 decl = OMP_CLAUSE_DECL (c);
1769 if (DECL_P (decl)
1770 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1771 && lookup_attribute ("omp declare target",
1772 DECL_ATTRIBUTES (decl)))
1773 break;
1774 if (DECL_P (decl))
1776 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1777 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1778 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1780 tree new_decl = lookup_decl (decl, ctx);
1781 TREE_TYPE (new_decl)
1782 = remap_type (TREE_TYPE (decl), &ctx->cb);
1784 else if (DECL_SIZE (decl)
1785 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1787 tree decl2 = DECL_VALUE_EXPR (decl);
1788 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1789 decl2 = TREE_OPERAND (decl2, 0);
1790 gcc_assert (DECL_P (decl2));
1791 fixup_remapped_decl (decl2, ctx, false);
1792 fixup_remapped_decl (decl, ctx, true);
1794 else
1795 fixup_remapped_decl (decl, ctx, false);
1797 break;
1799 case OMP_CLAUSE_COPYPRIVATE:
1800 case OMP_CLAUSE_COPYIN:
1801 case OMP_CLAUSE_DEFAULT:
1802 case OMP_CLAUSE_IF:
1803 case OMP_CLAUSE_NUM_THREADS:
1804 case OMP_CLAUSE_NUM_TEAMS:
1805 case OMP_CLAUSE_THREAD_LIMIT:
1806 case OMP_CLAUSE_DEVICE:
1807 case OMP_CLAUSE_SCHEDULE:
1808 case OMP_CLAUSE_DIST_SCHEDULE:
1809 case OMP_CLAUSE_NOWAIT:
1810 case OMP_CLAUSE_ORDERED:
1811 case OMP_CLAUSE_COLLAPSE:
1812 case OMP_CLAUSE_UNTIED:
1813 case OMP_CLAUSE_FINAL:
1814 case OMP_CLAUSE_MERGEABLE:
1815 case OMP_CLAUSE_PROC_BIND:
1816 case OMP_CLAUSE_SAFELEN:
1817 case OMP_CLAUSE_ALIGNED:
1818 case OMP_CLAUSE_DEPEND:
1819 case OMP_CLAUSE__LOOPTEMP_:
1820 case OMP_CLAUSE_TO:
1821 case OMP_CLAUSE_FROM:
1822 case OMP_CLAUSE__CILK_FOR_COUNT_:
1823 break;
1825 default:
1826 gcc_unreachable ();
1830 if (scan_array_reductions)
1831 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1832 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1833 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1835 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1836 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1838 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1839 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1840 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1841 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1842 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1843 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1846 /* Create a new name for omp child function. Returns an identifier. If
1847 IS_CILK_FOR is true then the suffix for the child function is
1848 "_cilk_for_fn." */
1850 static tree
1851 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
1853 if (is_cilk_for)
1854 return clone_function_name (current_function_decl, "_cilk_for_fn");
1855 return clone_function_name (current_function_decl,
1856 task_copy ? "_omp_cpyfn" : "_omp_fn");
1859 /* Returns the type of the induction variable for the child function for
1860 _Cilk_for and the types for _high and _low variables based on TYPE. */
1862 static tree
1863 cilk_for_check_loop_diff_type (tree type)
1865 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
1867 if (TYPE_UNSIGNED (type))
1868 return uint32_type_node;
1869 else
1870 return integer_type_node;
1872 else
1874 if (TYPE_UNSIGNED (type))
1875 return uint64_type_node;
1876 else
1877 return long_long_integer_type_node;
1881 /* Build a decl for the omp child function. It'll not contain a body
1882 yet, just the bare decl. */
1884 static void
1885 create_omp_child_function (omp_context *ctx, bool task_copy)
1887 tree decl, type, name, t;
1889 tree cilk_for_count
1890 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
1891 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1892 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
1893 tree cilk_var_type = NULL_TREE;
1895 name = create_omp_child_function_name (task_copy,
1896 cilk_for_count != NULL_TREE);
1897 if (task_copy)
1898 type = build_function_type_list (void_type_node, ptr_type_node,
1899 ptr_type_node, NULL_TREE);
1900 else if (cilk_for_count)
1902 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
1903 cilk_var_type = cilk_for_check_loop_diff_type (type);
1904 type = build_function_type_list (void_type_node, ptr_type_node,
1905 cilk_var_type, cilk_var_type, NULL_TREE);
1907 else
1908 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1910 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
1912 if (!task_copy)
1913 ctx->cb.dst_fn = decl;
1914 else
1915 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1917 TREE_STATIC (decl) = 1;
1918 TREE_USED (decl) = 1;
1919 DECL_ARTIFICIAL (decl) = 1;
1920 DECL_IGNORED_P (decl) = 0;
1921 TREE_PUBLIC (decl) = 0;
1922 DECL_UNINLINABLE (decl) = 1;
1923 DECL_EXTERNAL (decl) = 0;
1924 DECL_CONTEXT (decl) = NULL_TREE;
1925 DECL_INITIAL (decl) = make_node (BLOCK);
1926 bool target_p = false;
1927 if (lookup_attribute ("omp declare target",
1928 DECL_ATTRIBUTES (current_function_decl)))
1929 target_p = true;
1930 else
1932 omp_context *octx;
1933 for (octx = ctx; octx; octx = octx->outer)
1934 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1935 && gimple_omp_target_kind (octx->stmt)
1936 == GF_OMP_TARGET_KIND_REGION)
1938 target_p = true;
1939 break;
1942 if (target_p)
1943 DECL_ATTRIBUTES (decl)
1944 = tree_cons (get_identifier ("omp declare target"),
1945 NULL_TREE, DECL_ATTRIBUTES (decl));
1947 t = build_decl (DECL_SOURCE_LOCATION (decl),
1948 RESULT_DECL, NULL_TREE, void_type_node);
1949 DECL_ARTIFICIAL (t) = 1;
1950 DECL_IGNORED_P (t) = 1;
1951 DECL_CONTEXT (t) = decl;
1952 DECL_RESULT (decl) = t;
1954 /* _Cilk_for's child function requires two extra parameters called
1955 __low and __high that are set the by Cilk runtime when it calls this
1956 function. */
1957 if (cilk_for_count)
1959 t = build_decl (DECL_SOURCE_LOCATION (decl),
1960 PARM_DECL, get_identifier ("__high"), cilk_var_type);
1961 DECL_ARTIFICIAL (t) = 1;
1962 DECL_NAMELESS (t) = 1;
1963 DECL_ARG_TYPE (t) = ptr_type_node;
1964 DECL_CONTEXT (t) = current_function_decl;
1965 TREE_USED (t) = 1;
1966 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1967 DECL_ARGUMENTS (decl) = t;
1969 t = build_decl (DECL_SOURCE_LOCATION (decl),
1970 PARM_DECL, get_identifier ("__low"), cilk_var_type);
1971 DECL_ARTIFICIAL (t) = 1;
1972 DECL_NAMELESS (t) = 1;
1973 DECL_ARG_TYPE (t) = ptr_type_node;
1974 DECL_CONTEXT (t) = current_function_decl;
1975 TREE_USED (t) = 1;
1976 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1977 DECL_ARGUMENTS (decl) = t;
1980 tree data_name = get_identifier (".omp_data_i");
1981 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
1982 ptr_type_node);
1983 DECL_ARTIFICIAL (t) = 1;
1984 DECL_NAMELESS (t) = 1;
1985 DECL_ARG_TYPE (t) = ptr_type_node;
1986 DECL_CONTEXT (t) = current_function_decl;
1987 TREE_USED (t) = 1;
1988 if (cilk_for_count)
1989 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1990 DECL_ARGUMENTS (decl) = t;
1991 if (!task_copy)
1992 ctx->receiver_decl = t;
1993 else
1995 t = build_decl (DECL_SOURCE_LOCATION (decl),
1996 PARM_DECL, get_identifier (".omp_data_o"),
1997 ptr_type_node);
1998 DECL_ARTIFICIAL (t) = 1;
1999 DECL_NAMELESS (t) = 1;
2000 DECL_ARG_TYPE (t) = ptr_type_node;
2001 DECL_CONTEXT (t) = current_function_decl;
2002 TREE_USED (t) = 1;
2003 TREE_ADDRESSABLE (t) = 1;
2004 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2005 DECL_ARGUMENTS (decl) = t;
2008 /* Allocate memory for the function structure. The call to
2009 allocate_struct_function clobbers CFUN, so we need to restore
2010 it afterward. */
2011 push_struct_function (decl);
2012 cfun->function_end_locus = gimple_location (ctx->stmt);
2013 pop_cfun ();
2016 /* Callback for walk_gimple_seq. Check if combined parallel
2017 contains gimple_omp_for_combined_into_p OMP_FOR. */
2019 static tree
2020 find_combined_for (gimple_stmt_iterator *gsi_p,
2021 bool *handled_ops_p,
2022 struct walk_stmt_info *wi)
2024 gimple stmt = gsi_stmt (*gsi_p);
2026 *handled_ops_p = true;
2027 switch (gimple_code (stmt))
2029 WALK_SUBSTMTS;
2031 case GIMPLE_OMP_FOR:
2032 if (gimple_omp_for_combined_into_p (stmt)
2033 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2035 wi->info = stmt;
2036 return integer_zero_node;
2038 break;
2039 default:
2040 break;
2042 return NULL;
2045 /* Scan an OpenMP parallel directive. */
2047 static void
2048 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2050 omp_context *ctx;
2051 tree name;
2052 gimple_omp_parallel stmt = as_a <gimple_omp_parallel> (gsi_stmt (*gsi));
2054 /* Ignore parallel directives with empty bodies, unless there
2055 are copyin clauses. */
2056 if (optimize > 0
2057 && empty_body_p (gimple_omp_body (stmt))
2058 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2059 OMP_CLAUSE_COPYIN) == NULL)
2061 gsi_replace (gsi, gimple_build_nop (), false);
2062 return;
2065 if (gimple_omp_parallel_combined_p (stmt))
2067 struct walk_stmt_info wi;
2069 memset (&wi, 0, sizeof (wi));
2070 wi.val_only = true;
2071 walk_gimple_seq (gimple_omp_body (stmt),
2072 find_combined_for, NULL, &wi);
2073 if (wi.info)
2075 gimple_omp_for for_stmt = as_a <gimple_omp_for> ((gimple) wi.info);
2076 struct omp_for_data fd;
2077 extract_omp_for_data (for_stmt, &fd, NULL);
2078 /* We need two temporaries with fd.loop.v type (istart/iend)
2079 and then (fd.collapse - 1) temporaries with the same
2080 type for count2 ... countN-1 vars if not constant. */
2081 size_t count = 2, i;
2082 tree type = fd.iter_type;
2083 if (fd.collapse > 1
2084 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2085 count += fd.collapse - 1;
2086 for (i = 0; i < count; i++)
2088 tree temp = create_tmp_var (type, NULL);
2089 tree c = build_omp_clause (UNKNOWN_LOCATION,
2090 OMP_CLAUSE__LOOPTEMP_);
2091 insert_decl_map (&outer_ctx->cb, temp, temp);
2092 OMP_CLAUSE_DECL (c) = temp;
2093 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2094 gimple_omp_parallel_set_clauses (stmt, c);
2099 ctx = new_omp_context (stmt, outer_ctx);
2100 taskreg_contexts.safe_push (ctx);
2101 if (taskreg_nesting_level > 1)
2102 ctx->is_nested = true;
2103 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2104 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2105 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2106 name = create_tmp_var_name (".omp_data_s");
2107 name = build_decl (gimple_location (stmt),
2108 TYPE_DECL, name, ctx->record_type);
2109 DECL_ARTIFICIAL (name) = 1;
2110 DECL_NAMELESS (name) = 1;
2111 TYPE_NAME (ctx->record_type) = name;
2112 create_omp_child_function (ctx, false);
2113 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2115 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2116 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2118 if (TYPE_FIELDS (ctx->record_type) == NULL)
2119 ctx->record_type = ctx->receiver_decl = NULL;
2122 /* Scan an OpenMP task directive. */
2124 static void
2125 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2127 omp_context *ctx;
2128 tree name, t;
2129 gimple_omp_task stmt = as_a <gimple_omp_task> (gsi_stmt (*gsi));
2131 /* Ignore task directives with empty bodies. */
2132 if (optimize > 0
2133 && empty_body_p (gimple_omp_body (stmt)))
2135 gsi_replace (gsi, gimple_build_nop (), false);
2136 return;
2139 ctx = new_omp_context (stmt, outer_ctx);
2140 taskreg_contexts.safe_push (ctx);
2141 if (taskreg_nesting_level > 1)
2142 ctx->is_nested = true;
2143 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2144 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2145 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2146 name = create_tmp_var_name (".omp_data_s");
2147 name = build_decl (gimple_location (stmt),
2148 TYPE_DECL, name, ctx->record_type);
2149 DECL_ARTIFICIAL (name) = 1;
2150 DECL_NAMELESS (name) = 1;
2151 TYPE_NAME (ctx->record_type) = name;
2152 create_omp_child_function (ctx, false);
2153 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2155 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2157 if (ctx->srecord_type)
2159 name = create_tmp_var_name (".omp_data_a");
2160 name = build_decl (gimple_location (stmt),
2161 TYPE_DECL, name, ctx->srecord_type);
2162 DECL_ARTIFICIAL (name) = 1;
2163 DECL_NAMELESS (name) = 1;
2164 TYPE_NAME (ctx->srecord_type) = name;
2165 create_omp_child_function (ctx, true);
2168 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2170 if (TYPE_FIELDS (ctx->record_type) == NULL)
2172 ctx->record_type = ctx->receiver_decl = NULL;
2173 t = build_int_cst (long_integer_type_node, 0);
2174 gimple_omp_task_set_arg_size (stmt, t);
2175 t = build_int_cst (long_integer_type_node, 1);
2176 gimple_omp_task_set_arg_align (stmt, t);
2181 /* If any decls have been made addressable during scan_omp,
2182 adjust their fields if needed, and layout record types
2183 of parallel/task constructs. */
2185 static void
2186 finish_taskreg_scan (omp_context *ctx)
2188 if (ctx->record_type == NULL_TREE)
2189 return;
2191 /* If any task_shared_vars were needed, verify all
2192 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2193 statements if use_pointer_for_field hasn't changed
2194 because of that. If it did, update field types now. */
2195 if (task_shared_vars)
2197 tree c;
2199 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2200 c; c = OMP_CLAUSE_CHAIN (c))
2201 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
2203 tree decl = OMP_CLAUSE_DECL (c);
2205 /* Global variables don't need to be copied,
2206 the receiver side will use them directly. */
2207 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2208 continue;
2209 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2210 || !use_pointer_for_field (decl, ctx))
2211 continue;
2212 tree field = lookup_field (decl, ctx);
2213 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2214 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2215 continue;
2216 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2217 TREE_THIS_VOLATILE (field) = 0;
2218 DECL_USER_ALIGN (field) = 0;
2219 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2220 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2221 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2222 if (ctx->srecord_type)
2224 tree sfield = lookup_sfield (decl, ctx);
2225 TREE_TYPE (sfield) = TREE_TYPE (field);
2226 TREE_THIS_VOLATILE (sfield) = 0;
2227 DECL_USER_ALIGN (sfield) = 0;
2228 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2229 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2230 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2235 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2237 layout_type (ctx->record_type);
2238 fixup_child_record_type (ctx);
2240 else
2242 location_t loc = gimple_location (ctx->stmt);
2243 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2244 /* Move VLA fields to the end. */
2245 p = &TYPE_FIELDS (ctx->record_type);
2246 while (*p)
2247 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2248 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2250 *q = *p;
2251 *p = TREE_CHAIN (*p);
2252 TREE_CHAIN (*q) = NULL_TREE;
2253 q = &TREE_CHAIN (*q);
2255 else
2256 p = &DECL_CHAIN (*p);
2257 *p = vla_fields;
2258 layout_type (ctx->record_type);
2259 fixup_child_record_type (ctx);
2260 if (ctx->srecord_type)
2261 layout_type (ctx->srecord_type);
2262 tree t = fold_convert_loc (loc, long_integer_type_node,
2263 TYPE_SIZE_UNIT (ctx->record_type));
2264 gimple_omp_task_set_arg_size (ctx->stmt, t);
2265 t = build_int_cst (long_integer_type_node,
2266 TYPE_ALIGN_UNIT (ctx->record_type));
2267 gimple_omp_task_set_arg_align (ctx->stmt, t);
2272 /* Scan an OpenMP loop directive. */
2274 static void
2275 scan_omp_for (gimple_omp_for stmt, omp_context *outer_ctx)
2277 omp_context *ctx;
2278 size_t i;
2280 ctx = new_omp_context (stmt, outer_ctx);
2282 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2284 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2285 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2287 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2288 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2289 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2290 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2292 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2295 /* Scan an OpenMP sections directive. */
2297 static void
2298 scan_omp_sections (gimple_omp_sections stmt, omp_context *outer_ctx)
2300 omp_context *ctx;
2302 ctx = new_omp_context (stmt, outer_ctx);
2303 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2304 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2307 /* Scan an OpenMP single directive. */
2309 static void
2310 scan_omp_single (gimple_omp_single stmt, omp_context *outer_ctx)
2312 omp_context *ctx;
2313 tree name;
2315 ctx = new_omp_context (stmt, outer_ctx);
2316 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2317 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2318 name = create_tmp_var_name (".omp_copy_s");
2319 name = build_decl (gimple_location (stmt),
2320 TYPE_DECL, name, ctx->record_type);
2321 TYPE_NAME (ctx->record_type) = name;
2323 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2324 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2326 if (TYPE_FIELDS (ctx->record_type) == NULL)
2327 ctx->record_type = NULL;
2328 else
2329 layout_type (ctx->record_type);
2332 /* Scan an OpenMP target{, data, update} directive. */
2334 static void
2335 scan_omp_target (gimple_omp_target stmt, omp_context *outer_ctx)
2337 omp_context *ctx;
2338 tree name;
2339 int kind = gimple_omp_target_kind (stmt);
2341 ctx = new_omp_context (stmt, outer_ctx);
2342 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2343 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2344 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2345 name = create_tmp_var_name (".omp_data_t");
2346 name = build_decl (gimple_location (stmt),
2347 TYPE_DECL, name, ctx->record_type);
2348 DECL_ARTIFICIAL (name) = 1;
2349 DECL_NAMELESS (name) = 1;
2350 TYPE_NAME (ctx->record_type) = name;
2351 if (kind == GF_OMP_TARGET_KIND_REGION)
2353 create_omp_child_function (ctx, false);
2354 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2357 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2358 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2360 if (TYPE_FIELDS (ctx->record_type) == NULL)
2361 ctx->record_type = ctx->receiver_decl = NULL;
2362 else
2364 TYPE_FIELDS (ctx->record_type)
2365 = nreverse (TYPE_FIELDS (ctx->record_type));
2366 #ifdef ENABLE_CHECKING
2367 tree field;
2368 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2369 for (field = TYPE_FIELDS (ctx->record_type);
2370 field;
2371 field = DECL_CHAIN (field))
2372 gcc_assert (DECL_ALIGN (field) == align);
2373 #endif
2374 layout_type (ctx->record_type);
2375 if (kind == GF_OMP_TARGET_KIND_REGION)
2376 fixup_child_record_type (ctx);
2380 /* Scan an OpenMP teams directive. */
2382 static void
2383 scan_omp_teams (gimple_omp_teams stmt, omp_context *outer_ctx)
2385 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2386 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2387 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2390 /* Check OpenMP nesting restrictions. */
2391 static bool
2392 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2394 if (ctx != NULL)
2396 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2397 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2399 error_at (gimple_location (stmt),
2400 "OpenMP constructs may not be nested inside simd region");
2401 return false;
2403 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2405 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2406 || (gimple_omp_for_kind (stmt)
2407 != GF_OMP_FOR_KIND_DISTRIBUTE))
2408 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2410 error_at (gimple_location (stmt),
2411 "only distribute or parallel constructs are allowed to "
2412 "be closely nested inside teams construct");
2413 return false;
2417 switch (gimple_code (stmt))
2419 case GIMPLE_OMP_FOR:
2420 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2421 return true;
2422 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2424 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2426 error_at (gimple_location (stmt),
2427 "distribute construct must be closely nested inside "
2428 "teams construct");
2429 return false;
2431 return true;
2433 /* FALLTHRU */
2434 case GIMPLE_CALL:
2435 if (is_gimple_call (stmt)
2436 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2437 == BUILT_IN_GOMP_CANCEL
2438 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2439 == BUILT_IN_GOMP_CANCELLATION_POINT))
2441 const char *bad = NULL;
2442 const char *kind = NULL;
2443 if (ctx == NULL)
2445 error_at (gimple_location (stmt), "orphaned %qs construct",
2446 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2447 == BUILT_IN_GOMP_CANCEL
2448 ? "#pragma omp cancel"
2449 : "#pragma omp cancellation point");
2450 return false;
2452 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2453 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2454 : 0)
2456 case 1:
2457 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2458 bad = "#pragma omp parallel";
2459 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2460 == BUILT_IN_GOMP_CANCEL
2461 && !integer_zerop (gimple_call_arg (stmt, 1)))
2462 ctx->cancellable = true;
2463 kind = "parallel";
2464 break;
2465 case 2:
2466 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2467 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2468 bad = "#pragma omp for";
2469 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2470 == BUILT_IN_GOMP_CANCEL
2471 && !integer_zerop (gimple_call_arg (stmt, 1)))
2473 ctx->cancellable = true;
2474 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2475 OMP_CLAUSE_NOWAIT))
2476 warning_at (gimple_location (stmt), 0,
2477 "%<#pragma omp cancel for%> inside "
2478 "%<nowait%> for construct");
2479 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2480 OMP_CLAUSE_ORDERED))
2481 warning_at (gimple_location (stmt), 0,
2482 "%<#pragma omp cancel for%> inside "
2483 "%<ordered%> for construct");
2485 kind = "for";
2486 break;
2487 case 4:
2488 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2489 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2490 bad = "#pragma omp sections";
2491 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2492 == BUILT_IN_GOMP_CANCEL
2493 && !integer_zerop (gimple_call_arg (stmt, 1)))
2495 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2497 ctx->cancellable = true;
2498 if (find_omp_clause (gimple_omp_sections_clauses
2499 (ctx->stmt),
2500 OMP_CLAUSE_NOWAIT))
2501 warning_at (gimple_location (stmt), 0,
2502 "%<#pragma omp cancel sections%> inside "
2503 "%<nowait%> sections construct");
2505 else
2507 gcc_assert (ctx->outer
2508 && gimple_code (ctx->outer->stmt)
2509 == GIMPLE_OMP_SECTIONS);
2510 ctx->outer->cancellable = true;
2511 if (find_omp_clause (gimple_omp_sections_clauses
2512 (ctx->outer->stmt),
2513 OMP_CLAUSE_NOWAIT))
2514 warning_at (gimple_location (stmt), 0,
2515 "%<#pragma omp cancel sections%> inside "
2516 "%<nowait%> sections construct");
2519 kind = "sections";
2520 break;
2521 case 8:
2522 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2523 bad = "#pragma omp task";
2524 else
2525 ctx->cancellable = true;
2526 kind = "taskgroup";
2527 break;
2528 default:
2529 error_at (gimple_location (stmt), "invalid arguments");
2530 return false;
2532 if (bad)
2534 error_at (gimple_location (stmt),
2535 "%<%s %s%> construct not closely nested inside of %qs",
2536 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2537 == BUILT_IN_GOMP_CANCEL
2538 ? "#pragma omp cancel"
2539 : "#pragma omp cancellation point", kind, bad);
2540 return false;
2543 /* FALLTHRU */
2544 case GIMPLE_OMP_SECTIONS:
2545 case GIMPLE_OMP_SINGLE:
2546 for (; ctx != NULL; ctx = ctx->outer)
2547 switch (gimple_code (ctx->stmt))
2549 case GIMPLE_OMP_FOR:
2550 case GIMPLE_OMP_SECTIONS:
2551 case GIMPLE_OMP_SINGLE:
2552 case GIMPLE_OMP_ORDERED:
2553 case GIMPLE_OMP_MASTER:
2554 case GIMPLE_OMP_TASK:
2555 case GIMPLE_OMP_CRITICAL:
2556 if (is_gimple_call (stmt))
2558 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2559 != BUILT_IN_GOMP_BARRIER)
2560 return true;
2561 error_at (gimple_location (stmt),
2562 "barrier region may not be closely nested inside "
2563 "of work-sharing, critical, ordered, master or "
2564 "explicit task region");
2565 return false;
2567 error_at (gimple_location (stmt),
2568 "work-sharing region may not be closely nested inside "
2569 "of work-sharing, critical, ordered, master or explicit "
2570 "task region");
2571 return false;
2572 case GIMPLE_OMP_PARALLEL:
2573 return true;
2574 default:
2575 break;
2577 break;
2578 case GIMPLE_OMP_MASTER:
2579 for (; ctx != NULL; ctx = ctx->outer)
2580 switch (gimple_code (ctx->stmt))
2582 case GIMPLE_OMP_FOR:
2583 case GIMPLE_OMP_SECTIONS:
2584 case GIMPLE_OMP_SINGLE:
2585 case GIMPLE_OMP_TASK:
2586 error_at (gimple_location (stmt),
2587 "master region may not be closely nested inside "
2588 "of work-sharing or explicit task region");
2589 return false;
2590 case GIMPLE_OMP_PARALLEL:
2591 return true;
2592 default:
2593 break;
2595 break;
2596 case GIMPLE_OMP_ORDERED:
2597 for (; ctx != NULL; ctx = ctx->outer)
2598 switch (gimple_code (ctx->stmt))
2600 case GIMPLE_OMP_CRITICAL:
2601 case GIMPLE_OMP_TASK:
2602 error_at (gimple_location (stmt),
2603 "ordered region may not be closely nested inside "
2604 "of critical or explicit task region");
2605 return false;
2606 case GIMPLE_OMP_FOR:
2607 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2608 OMP_CLAUSE_ORDERED) == NULL)
2610 error_at (gimple_location (stmt),
2611 "ordered region must be closely nested inside "
2612 "a loop region with an ordered clause");
2613 return false;
2615 return true;
2616 case GIMPLE_OMP_PARALLEL:
2617 error_at (gimple_location (stmt),
2618 "ordered region must be closely nested inside "
2619 "a loop region with an ordered clause");
2620 return false;
2621 default:
2622 break;
2624 break;
2625 case GIMPLE_OMP_CRITICAL:
2627 tree this_stmt_name =
2628 gimple_omp_critical_name (as_a <gimple_omp_critical> (stmt));
2629 for (; ctx != NULL; ctx = ctx->outer)
2630 if (gimple_omp_critical other_crit =
2631 dyn_cast <gimple_omp_critical> (ctx->stmt))
2632 if (this_stmt_name == gimple_omp_critical_name (other_crit))
2634 error_at (gimple_location (stmt),
2635 "critical region may not be nested inside a critical "
2636 "region with the same name");
2637 return false;
2640 break;
2641 case GIMPLE_OMP_TEAMS:
2642 if (ctx == NULL
2643 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2644 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2646 error_at (gimple_location (stmt),
2647 "teams construct not closely nested inside of target "
2648 "region");
2649 return false;
2651 break;
2652 case GIMPLE_OMP_TARGET:
2653 for (; ctx != NULL; ctx = ctx->outer)
2654 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
2655 && gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_REGION)
2657 const char *name;
2658 switch (gimple_omp_target_kind (stmt))
2660 case GF_OMP_TARGET_KIND_REGION: name = "target"; break;
2661 case GF_OMP_TARGET_KIND_DATA: name = "target data"; break;
2662 case GF_OMP_TARGET_KIND_UPDATE: name = "target update"; break;
2663 default: gcc_unreachable ();
2665 warning_at (gimple_location (stmt), 0,
2666 "%s construct inside of target region", name);
2668 break;
2669 default:
2670 break;
2672 return true;
2676 /* Helper function scan_omp.
2678 Callback for walk_tree or operators in walk_gimple_stmt used to
2679 scan for OpenMP directives in TP. */
2681 static tree
2682 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2684 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2685 omp_context *ctx = (omp_context *) wi->info;
2686 tree t = *tp;
2688 switch (TREE_CODE (t))
2690 case VAR_DECL:
2691 case PARM_DECL:
2692 case LABEL_DECL:
2693 case RESULT_DECL:
2694 if (ctx)
2695 *tp = remap_decl (t, &ctx->cb);
2696 break;
2698 default:
2699 if (ctx && TYPE_P (t))
2700 *tp = remap_type (t, &ctx->cb);
2701 else if (!DECL_P (t))
2703 *walk_subtrees = 1;
2704 if (ctx)
2706 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2707 if (tem != TREE_TYPE (t))
2709 if (TREE_CODE (t) == INTEGER_CST)
2710 *tp = wide_int_to_tree (tem, t);
2711 else
2712 TREE_TYPE (t) = tem;
2716 break;
2719 return NULL_TREE;
2722 /* Return true if FNDECL is a setjmp or a longjmp. */
2724 static bool
2725 setjmp_or_longjmp_p (const_tree fndecl)
2727 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2728 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2729 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2730 return true;
2732 tree declname = DECL_NAME (fndecl);
2733 if (!declname)
2734 return false;
2735 const char *name = IDENTIFIER_POINTER (declname);
2736 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2740 /* Helper function for scan_omp.
2742 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2743 the current statement in GSI. */
2745 static tree
2746 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2747 struct walk_stmt_info *wi)
2749 gimple stmt = gsi_stmt (*gsi);
2750 omp_context *ctx = (omp_context *) wi->info;
2752 if (gimple_has_location (stmt))
2753 input_location = gimple_location (stmt);
2755 /* Check the OpenMP nesting restrictions. */
2756 bool remove = false;
2757 if (is_gimple_omp (stmt))
2758 remove = !check_omp_nesting_restrictions (stmt, ctx);
2759 else if (is_gimple_call (stmt))
2761 tree fndecl = gimple_call_fndecl (stmt);
2762 if (fndecl)
2764 if (setjmp_or_longjmp_p (fndecl)
2765 && ctx
2766 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2767 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2769 remove = true;
2770 error_at (gimple_location (stmt),
2771 "setjmp/longjmp inside simd construct");
2773 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2774 switch (DECL_FUNCTION_CODE (fndecl))
2776 case BUILT_IN_GOMP_BARRIER:
2777 case BUILT_IN_GOMP_CANCEL:
2778 case BUILT_IN_GOMP_CANCELLATION_POINT:
2779 case BUILT_IN_GOMP_TASKYIELD:
2780 case BUILT_IN_GOMP_TASKWAIT:
2781 case BUILT_IN_GOMP_TASKGROUP_START:
2782 case BUILT_IN_GOMP_TASKGROUP_END:
2783 remove = !check_omp_nesting_restrictions (stmt, ctx);
2784 break;
2785 default:
2786 break;
2790 if (remove)
2792 stmt = gimple_build_nop ();
2793 gsi_replace (gsi, stmt, false);
2796 *handled_ops_p = true;
2798 switch (gimple_code (stmt))
2800 case GIMPLE_OMP_PARALLEL:
2801 taskreg_nesting_level++;
2802 scan_omp_parallel (gsi, ctx);
2803 taskreg_nesting_level--;
2804 break;
2806 case GIMPLE_OMP_TASK:
2807 taskreg_nesting_level++;
2808 scan_omp_task (gsi, ctx);
2809 taskreg_nesting_level--;
2810 break;
2812 case GIMPLE_OMP_FOR:
2813 scan_omp_for (as_a <gimple_omp_for> (stmt), ctx);
2814 break;
2816 case GIMPLE_OMP_SECTIONS:
2817 scan_omp_sections (as_a <gimple_omp_sections> (stmt), ctx);
2818 break;
2820 case GIMPLE_OMP_SINGLE:
2821 scan_omp_single (as_a <gimple_omp_single> (stmt), ctx);
2822 break;
2824 case GIMPLE_OMP_SECTION:
2825 case GIMPLE_OMP_MASTER:
2826 case GIMPLE_OMP_TASKGROUP:
2827 case GIMPLE_OMP_ORDERED:
2828 case GIMPLE_OMP_CRITICAL:
2829 ctx = new_omp_context (stmt, ctx);
2830 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2831 break;
2833 case GIMPLE_OMP_TARGET:
2834 scan_omp_target (as_a <gimple_omp_target> (stmt), ctx);
2835 break;
2837 case GIMPLE_OMP_TEAMS:
2838 scan_omp_teams (as_a <gimple_omp_teams> (stmt), ctx);
2839 break;
2841 case GIMPLE_BIND:
2843 tree var;
2845 *handled_ops_p = false;
2846 if (ctx)
2847 for (var = gimple_bind_vars (as_a <gimple_bind> (stmt));
2848 var ;
2849 var = DECL_CHAIN (var))
2850 insert_decl_map (&ctx->cb, var, var);
2852 break;
2853 default:
2854 *handled_ops_p = false;
2855 break;
2858 return NULL_TREE;
2862 /* Scan all the statements starting at the current statement. CTX
2863 contains context information about the OpenMP directives and
2864 clauses found during the scan. */
2866 static void
2867 scan_omp (gimple_seq *body_p, omp_context *ctx)
2869 location_t saved_location;
2870 struct walk_stmt_info wi;
2872 memset (&wi, 0, sizeof (wi));
2873 wi.info = ctx;
2874 wi.want_locations = true;
2876 saved_location = input_location;
2877 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2878 input_location = saved_location;
2881 /* Re-gimplification and code generation routines. */
2883 /* Build a call to GOMP_barrier. */
2885 static gimple
2886 build_omp_barrier (tree lhs)
2888 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2889 : BUILT_IN_GOMP_BARRIER);
2890 gimple_call g = gimple_build_call (fndecl, 0);
2891 if (lhs)
2892 gimple_call_set_lhs (g, lhs);
2893 return g;
2896 /* If a context was created for STMT when it was scanned, return it. */
2898 static omp_context *
2899 maybe_lookup_ctx (gimple stmt)
2901 splay_tree_node n;
2902 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2903 return n ? (omp_context *) n->value : NULL;
2907 /* Find the mapping for DECL in CTX or the immediately enclosing
2908 context that has a mapping for DECL.
2910 If CTX is a nested parallel directive, we may have to use the decl
2911 mappings created in CTX's parent context. Suppose that we have the
2912 following parallel nesting (variable UIDs showed for clarity):
2914 iD.1562 = 0;
2915 #omp parallel shared(iD.1562) -> outer parallel
2916 iD.1562 = iD.1562 + 1;
2918 #omp parallel shared (iD.1562) -> inner parallel
2919 iD.1562 = iD.1562 - 1;
2921 Each parallel structure will create a distinct .omp_data_s structure
2922 for copying iD.1562 in/out of the directive:
2924 outer parallel .omp_data_s.1.i -> iD.1562
2925 inner parallel .omp_data_s.2.i -> iD.1562
2927 A shared variable mapping will produce a copy-out operation before
2928 the parallel directive and a copy-in operation after it. So, in
2929 this case we would have:
2931 iD.1562 = 0;
2932 .omp_data_o.1.i = iD.1562;
2933 #omp parallel shared(iD.1562) -> outer parallel
2934 .omp_data_i.1 = &.omp_data_o.1
2935 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2937 .omp_data_o.2.i = iD.1562; -> **
2938 #omp parallel shared(iD.1562) -> inner parallel
2939 .omp_data_i.2 = &.omp_data_o.2
2940 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2943 ** This is a problem. The symbol iD.1562 cannot be referenced
2944 inside the body of the outer parallel region. But since we are
2945 emitting this copy operation while expanding the inner parallel
2946 directive, we need to access the CTX structure of the outer
2947 parallel directive to get the correct mapping:
2949 .omp_data_o.2.i = .omp_data_i.1->i
2951 Since there may be other workshare or parallel directives enclosing
2952 the parallel directive, it may be necessary to walk up the context
2953 parent chain. This is not a problem in general because nested
2954 parallelism happens only rarely. */
2956 static tree
2957 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2959 tree t;
2960 omp_context *up;
2962 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2963 t = maybe_lookup_decl (decl, up);
2965 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2967 return t ? t : decl;
2971 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2972 in outer contexts. */
2974 static tree
2975 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2977 tree t = NULL;
2978 omp_context *up;
2980 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2981 t = maybe_lookup_decl (decl, up);
2983 return t ? t : decl;
2987 /* Construct the initialization value for reduction CLAUSE. */
2989 tree
2990 omp_reduction_init (tree clause, tree type)
2992 location_t loc = OMP_CLAUSE_LOCATION (clause);
2993 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2995 case PLUS_EXPR:
2996 case MINUS_EXPR:
2997 case BIT_IOR_EXPR:
2998 case BIT_XOR_EXPR:
2999 case TRUTH_OR_EXPR:
3000 case TRUTH_ORIF_EXPR:
3001 case TRUTH_XOR_EXPR:
3002 case NE_EXPR:
3003 return build_zero_cst (type);
3005 case MULT_EXPR:
3006 case TRUTH_AND_EXPR:
3007 case TRUTH_ANDIF_EXPR:
3008 case EQ_EXPR:
3009 return fold_convert_loc (loc, type, integer_one_node);
3011 case BIT_AND_EXPR:
3012 return fold_convert_loc (loc, type, integer_minus_one_node);
3014 case MAX_EXPR:
3015 if (SCALAR_FLOAT_TYPE_P (type))
3017 REAL_VALUE_TYPE max, min;
3018 if (HONOR_INFINITIES (TYPE_MODE (type)))
3020 real_inf (&max);
3021 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3023 else
3024 real_maxval (&min, 1, TYPE_MODE (type));
3025 return build_real (type, min);
3027 else
3029 gcc_assert (INTEGRAL_TYPE_P (type));
3030 return TYPE_MIN_VALUE (type);
3033 case MIN_EXPR:
3034 if (SCALAR_FLOAT_TYPE_P (type))
3036 REAL_VALUE_TYPE max;
3037 if (HONOR_INFINITIES (TYPE_MODE (type)))
3038 real_inf (&max);
3039 else
3040 real_maxval (&max, 0, TYPE_MODE (type));
3041 return build_real (type, max);
3043 else
3045 gcc_assert (INTEGRAL_TYPE_P (type));
3046 return TYPE_MAX_VALUE (type);
3049 default:
3050 gcc_unreachable ();
3054 /* Return alignment to be assumed for var in CLAUSE, which should be
3055 OMP_CLAUSE_ALIGNED. */
3057 static tree
3058 omp_clause_aligned_alignment (tree clause)
3060 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3061 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3063 /* Otherwise return implementation defined alignment. */
3064 unsigned int al = 1;
3065 enum machine_mode mode, vmode;
3066 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3067 if (vs)
3068 vs = 1 << floor_log2 (vs);
3069 static enum mode_class classes[]
3070 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3071 for (int i = 0; i < 4; i += 2)
3072 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3073 mode != VOIDmode;
3074 mode = GET_MODE_WIDER_MODE (mode))
3076 vmode = targetm.vectorize.preferred_simd_mode (mode);
3077 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3078 continue;
3079 while (vs
3080 && GET_MODE_SIZE (vmode) < vs
3081 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3082 vmode = GET_MODE_2XWIDER_MODE (vmode);
3084 tree type = lang_hooks.types.type_for_mode (mode, 1);
3085 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3086 continue;
3087 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3088 / GET_MODE_SIZE (mode));
3089 if (TYPE_MODE (type) != vmode)
3090 continue;
3091 if (TYPE_ALIGN_UNIT (type) > al)
3092 al = TYPE_ALIGN_UNIT (type);
3094 return build_int_cst (integer_type_node, al);
3097 /* Return maximum possible vectorization factor for the target. */
3099 static int
3100 omp_max_vf (void)
3102 if (!optimize
3103 || optimize_debug
3104 || !flag_tree_loop_optimize
3105 || (!flag_tree_loop_vectorize
3106 && (global_options_set.x_flag_tree_loop_vectorize
3107 || global_options_set.x_flag_tree_vectorize)))
3108 return 1;
3110 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3111 if (vs)
3113 vs = 1 << floor_log2 (vs);
3114 return vs;
3116 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3117 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3118 return GET_MODE_NUNITS (vqimode);
3119 return 1;
3122 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3123 privatization. */
3125 static bool
3126 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3127 tree &idx, tree &lane, tree &ivar, tree &lvar)
3129 if (max_vf == 0)
3131 max_vf = omp_max_vf ();
3132 if (max_vf > 1)
3134 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3135 OMP_CLAUSE_SAFELEN);
3136 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3137 max_vf = 1;
3138 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3139 max_vf) == -1)
3140 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3142 if (max_vf > 1)
3144 idx = create_tmp_var (unsigned_type_node, NULL);
3145 lane = create_tmp_var (unsigned_type_node, NULL);
3148 if (max_vf == 1)
3149 return false;
3151 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3152 tree avar = create_tmp_var_raw (atype, NULL);
3153 if (TREE_ADDRESSABLE (new_var))
3154 TREE_ADDRESSABLE (avar) = 1;
3155 DECL_ATTRIBUTES (avar)
3156 = tree_cons (get_identifier ("omp simd array"), NULL,
3157 DECL_ATTRIBUTES (avar));
3158 gimple_add_tmp_var (avar);
3159 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3160 NULL_TREE, NULL_TREE);
3161 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3162 NULL_TREE, NULL_TREE);
3163 if (DECL_P (new_var))
3165 SET_DECL_VALUE_EXPR (new_var, lvar);
3166 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3168 return true;
3171 /* Helper function of lower_rec_input_clauses. For a reference
3172 in simd reduction, add an underlying variable it will reference. */
3174 static void
3175 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3177 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3178 if (TREE_CONSTANT (z))
3180 const char *name = NULL;
3181 if (DECL_NAME (new_vard))
3182 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3184 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3185 gimple_add_tmp_var (z);
3186 TREE_ADDRESSABLE (z) = 1;
3187 z = build_fold_addr_expr_loc (loc, z);
3188 gimplify_assign (new_vard, z, ilist);
3192 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3193 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3194 private variables. Initialization statements go in ILIST, while calls
3195 to destructors go in DLIST. */
3197 static void
3198 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3199 omp_context *ctx, struct omp_for_data *fd)
3201 tree c, dtor, copyin_seq, x, ptr;
3202 bool copyin_by_ref = false;
3203 bool lastprivate_firstprivate = false;
3204 bool reduction_omp_orig_ref = false;
3205 int pass;
3206 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3207 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3208 int max_vf = 0;
3209 tree lane = NULL_TREE, idx = NULL_TREE;
3210 tree ivar = NULL_TREE, lvar = NULL_TREE;
3211 gimple_seq llist[2] = { NULL, NULL };
3213 copyin_seq = NULL;
3215 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3216 with data sharing clauses referencing variable sized vars. That
3217 is unnecessarily hard to support and very unlikely to result in
3218 vectorized code anyway. */
3219 if (is_simd)
3220 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3221 switch (OMP_CLAUSE_CODE (c))
3223 case OMP_CLAUSE_LINEAR:
3224 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3225 max_vf = 1;
3226 /* FALLTHRU */
3227 case OMP_CLAUSE_REDUCTION:
3228 case OMP_CLAUSE_PRIVATE:
3229 case OMP_CLAUSE_FIRSTPRIVATE:
3230 case OMP_CLAUSE_LASTPRIVATE:
3231 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3232 max_vf = 1;
3233 break;
3234 default:
3235 continue;
3238 /* Do all the fixed sized types in the first pass, and the variable sized
3239 types in the second pass. This makes sure that the scalar arguments to
3240 the variable sized types are processed before we use them in the
3241 variable sized operations. */
3242 for (pass = 0; pass < 2; ++pass)
3244 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3246 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3247 tree var, new_var;
3248 bool by_ref;
3249 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3251 switch (c_kind)
3253 case OMP_CLAUSE_PRIVATE:
3254 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3255 continue;
3256 break;
3257 case OMP_CLAUSE_SHARED:
3258 /* Ignore shared directives in teams construct. */
3259 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3260 continue;
3261 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3263 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3264 continue;
3266 case OMP_CLAUSE_FIRSTPRIVATE:
3267 case OMP_CLAUSE_COPYIN:
3268 case OMP_CLAUSE_LINEAR:
3269 break;
3270 case OMP_CLAUSE_REDUCTION:
3271 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3272 reduction_omp_orig_ref = true;
3273 break;
3274 case OMP_CLAUSE__LOOPTEMP_:
3275 /* Handle _looptemp_ clauses only on parallel. */
3276 if (fd)
3277 continue;
3278 break;
3279 case OMP_CLAUSE_LASTPRIVATE:
3280 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3282 lastprivate_firstprivate = true;
3283 if (pass != 0)
3284 continue;
3286 /* Even without corresponding firstprivate, if
3287 decl is Fortran allocatable, it needs outer var
3288 reference. */
3289 else if (pass == 0
3290 && lang_hooks.decls.omp_private_outer_ref
3291 (OMP_CLAUSE_DECL (c)))
3292 lastprivate_firstprivate = true;
3293 break;
3294 case OMP_CLAUSE_ALIGNED:
3295 if (pass == 0)
3296 continue;
3297 var = OMP_CLAUSE_DECL (c);
3298 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3299 && !is_global_var (var))
3301 new_var = maybe_lookup_decl (var, ctx);
3302 if (new_var == NULL_TREE)
3303 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3304 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3305 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3306 omp_clause_aligned_alignment (c));
3307 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3308 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3309 gimplify_and_add (x, ilist);
3311 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3312 && is_global_var (var))
3314 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3315 new_var = lookup_decl (var, ctx);
3316 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3317 t = build_fold_addr_expr_loc (clause_loc, t);
3318 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3319 t = build_call_expr_loc (clause_loc, t2, 2, t,
3320 omp_clause_aligned_alignment (c));
3321 t = fold_convert_loc (clause_loc, ptype, t);
3322 x = create_tmp_var (ptype, NULL);
3323 t = build2 (MODIFY_EXPR, ptype, x, t);
3324 gimplify_and_add (t, ilist);
3325 t = build_simple_mem_ref_loc (clause_loc, x);
3326 SET_DECL_VALUE_EXPR (new_var, t);
3327 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3329 continue;
3330 default:
3331 continue;
3334 new_var = var = OMP_CLAUSE_DECL (c);
3335 if (c_kind != OMP_CLAUSE_COPYIN)
3336 new_var = lookup_decl (var, ctx);
3338 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3340 if (pass != 0)
3341 continue;
3343 else if (is_variable_sized (var))
3345 /* For variable sized types, we need to allocate the
3346 actual storage here. Call alloca and store the
3347 result in the pointer decl that we created elsewhere. */
3348 if (pass == 0)
3349 continue;
3351 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3353 gimple_call stmt;
3354 tree tmp, atmp;
3356 ptr = DECL_VALUE_EXPR (new_var);
3357 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3358 ptr = TREE_OPERAND (ptr, 0);
3359 gcc_assert (DECL_P (ptr));
3360 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3362 /* void *tmp = __builtin_alloca */
3363 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3364 stmt = gimple_build_call (atmp, 1, x);
3365 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3366 gimple_add_tmp_var (tmp);
3367 gimple_call_set_lhs (stmt, tmp);
3369 gimple_seq_add_stmt (ilist, stmt);
3371 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3372 gimplify_assign (ptr, x, ilist);
3375 else if (is_reference (var))
3377 /* For references that are being privatized for Fortran,
3378 allocate new backing storage for the new pointer
3379 variable. This allows us to avoid changing all the
3380 code that expects a pointer to something that expects
3381 a direct variable. */
3382 if (pass == 0)
3383 continue;
3385 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3386 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3388 x = build_receiver_ref (var, false, ctx);
3389 x = build_fold_addr_expr_loc (clause_loc, x);
3391 else if (TREE_CONSTANT (x))
3393 /* For reduction in SIMD loop, defer adding the
3394 initialization of the reference, because if we decide
3395 to use SIMD array for it, the initilization could cause
3396 expansion ICE. */
3397 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3398 x = NULL_TREE;
3399 else
3401 const char *name = NULL;
3402 if (DECL_NAME (var))
3403 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3405 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3406 name);
3407 gimple_add_tmp_var (x);
3408 TREE_ADDRESSABLE (x) = 1;
3409 x = build_fold_addr_expr_loc (clause_loc, x);
3412 else
3414 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3415 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3418 if (x)
3420 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3421 gimplify_assign (new_var, x, ilist);
3424 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3426 else if (c_kind == OMP_CLAUSE_REDUCTION
3427 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3429 if (pass == 0)
3430 continue;
3432 else if (pass != 0)
3433 continue;
3435 switch (OMP_CLAUSE_CODE (c))
3437 case OMP_CLAUSE_SHARED:
3438 /* Ignore shared directives in teams construct. */
3439 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3440 continue;
3441 /* Shared global vars are just accessed directly. */
3442 if (is_global_var (new_var))
3443 break;
3444 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3445 needs to be delayed until after fixup_child_record_type so
3446 that we get the correct type during the dereference. */
3447 by_ref = use_pointer_for_field (var, ctx);
3448 x = build_receiver_ref (var, by_ref, ctx);
3449 SET_DECL_VALUE_EXPR (new_var, x);
3450 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3452 /* ??? If VAR is not passed by reference, and the variable
3453 hasn't been initialized yet, then we'll get a warning for
3454 the store into the omp_data_s structure. Ideally, we'd be
3455 able to notice this and not store anything at all, but
3456 we're generating code too early. Suppress the warning. */
3457 if (!by_ref)
3458 TREE_NO_WARNING (var) = 1;
3459 break;
3461 case OMP_CLAUSE_LASTPRIVATE:
3462 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3463 break;
3464 /* FALLTHRU */
3466 case OMP_CLAUSE_PRIVATE:
3467 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3468 x = build_outer_var_ref (var, ctx);
3469 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3471 if (is_task_ctx (ctx))
3472 x = build_receiver_ref (var, false, ctx);
3473 else
3474 x = build_outer_var_ref (var, ctx);
3476 else
3477 x = NULL;
3478 do_private:
3479 tree nx;
3480 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3481 if (is_simd)
3483 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3484 if ((TREE_ADDRESSABLE (new_var) || nx || y
3485 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3486 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3487 idx, lane, ivar, lvar))
3489 if (nx)
3490 x = lang_hooks.decls.omp_clause_default_ctor
3491 (c, unshare_expr (ivar), x);
3492 if (nx && x)
3493 gimplify_and_add (x, &llist[0]);
3494 if (y)
3496 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3497 if (y)
3499 gimple_seq tseq = NULL;
3501 dtor = y;
3502 gimplify_stmt (&dtor, &tseq);
3503 gimple_seq_add_seq (&llist[1], tseq);
3506 break;
3509 if (nx)
3510 gimplify_and_add (nx, ilist);
3511 /* FALLTHRU */
3513 do_dtor:
3514 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3515 if (x)
3517 gimple_seq tseq = NULL;
3519 dtor = x;
3520 gimplify_stmt (&dtor, &tseq);
3521 gimple_seq_add_seq (dlist, tseq);
3523 break;
3525 case OMP_CLAUSE_LINEAR:
3526 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3527 goto do_firstprivate;
3528 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3529 x = NULL;
3530 else
3531 x = build_outer_var_ref (var, ctx);
3532 goto do_private;
3534 case OMP_CLAUSE_FIRSTPRIVATE:
3535 if (is_task_ctx (ctx))
3537 if (is_reference (var) || is_variable_sized (var))
3538 goto do_dtor;
3539 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3540 ctx))
3541 || use_pointer_for_field (var, NULL))
3543 x = build_receiver_ref (var, false, ctx);
3544 SET_DECL_VALUE_EXPR (new_var, x);
3545 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3546 goto do_dtor;
3549 do_firstprivate:
3550 x = build_outer_var_ref (var, ctx);
3551 if (is_simd)
3553 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3554 && gimple_omp_for_combined_into_p (ctx->stmt))
3556 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3557 tree stept = TREE_TYPE (t);
3558 tree ct = find_omp_clause (clauses,
3559 OMP_CLAUSE__LOOPTEMP_);
3560 gcc_assert (ct);
3561 tree l = OMP_CLAUSE_DECL (ct);
3562 tree n1 = fd->loop.n1;
3563 tree step = fd->loop.step;
3564 tree itype = TREE_TYPE (l);
3565 if (POINTER_TYPE_P (itype))
3566 itype = signed_type_for (itype);
3567 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3568 if (TYPE_UNSIGNED (itype)
3569 && fd->loop.cond_code == GT_EXPR)
3570 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3571 fold_build1 (NEGATE_EXPR, itype, l),
3572 fold_build1 (NEGATE_EXPR,
3573 itype, step));
3574 else
3575 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3576 t = fold_build2 (MULT_EXPR, stept,
3577 fold_convert (stept, l), t);
3579 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3581 x = lang_hooks.decls.omp_clause_linear_ctor
3582 (c, new_var, x, t);
3583 gimplify_and_add (x, ilist);
3584 goto do_dtor;
3587 if (POINTER_TYPE_P (TREE_TYPE (x)))
3588 x = fold_build2 (POINTER_PLUS_EXPR,
3589 TREE_TYPE (x), x, t);
3590 else
3591 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3594 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3595 || TREE_ADDRESSABLE (new_var))
3596 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3597 idx, lane, ivar, lvar))
3599 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3601 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3602 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3603 gimplify_and_add (x, ilist);
3604 gimple_stmt_iterator gsi
3605 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3606 gimple_assign g
3607 = gimple_build_assign (unshare_expr (lvar), iv);
3608 gsi_insert_before_without_update (&gsi, g,
3609 GSI_SAME_STMT);
3610 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3611 enum tree_code code = PLUS_EXPR;
3612 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3613 code = POINTER_PLUS_EXPR;
3614 g = gimple_build_assign_with_ops (code, iv, iv, t);
3615 gsi_insert_before_without_update (&gsi, g,
3616 GSI_SAME_STMT);
3617 break;
3619 x = lang_hooks.decls.omp_clause_copy_ctor
3620 (c, unshare_expr (ivar), x);
3621 gimplify_and_add (x, &llist[0]);
3622 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3623 if (x)
3625 gimple_seq tseq = NULL;
3627 dtor = x;
3628 gimplify_stmt (&dtor, &tseq);
3629 gimple_seq_add_seq (&llist[1], tseq);
3631 break;
3634 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3635 gimplify_and_add (x, ilist);
3636 goto do_dtor;
3638 case OMP_CLAUSE__LOOPTEMP_:
3639 gcc_assert (is_parallel_ctx (ctx));
3640 x = build_outer_var_ref (var, ctx);
3641 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3642 gimplify_and_add (x, ilist);
3643 break;
3645 case OMP_CLAUSE_COPYIN:
3646 by_ref = use_pointer_for_field (var, NULL);
3647 x = build_receiver_ref (var, by_ref, ctx);
3648 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3649 append_to_statement_list (x, &copyin_seq);
3650 copyin_by_ref |= by_ref;
3651 break;
3653 case OMP_CLAUSE_REDUCTION:
3654 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3656 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3657 gimple tseq;
3658 x = build_outer_var_ref (var, ctx);
3660 if (is_reference (var)
3661 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3662 TREE_TYPE (x)))
3663 x = build_fold_addr_expr_loc (clause_loc, x);
3664 SET_DECL_VALUE_EXPR (placeholder, x);
3665 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3666 tree new_vard = new_var;
3667 if (is_reference (var))
3669 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3670 new_vard = TREE_OPERAND (new_var, 0);
3671 gcc_assert (DECL_P (new_vard));
3673 if (is_simd
3674 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3675 idx, lane, ivar, lvar))
3677 if (new_vard == new_var)
3679 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3680 SET_DECL_VALUE_EXPR (new_var, ivar);
3682 else
3684 SET_DECL_VALUE_EXPR (new_vard,
3685 build_fold_addr_expr (ivar));
3686 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3688 x = lang_hooks.decls.omp_clause_default_ctor
3689 (c, unshare_expr (ivar),
3690 build_outer_var_ref (var, ctx));
3691 if (x)
3692 gimplify_and_add (x, &llist[0]);
3693 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3695 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3696 lower_omp (&tseq, ctx);
3697 gimple_seq_add_seq (&llist[0], tseq);
3699 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3700 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3701 lower_omp (&tseq, ctx);
3702 gimple_seq_add_seq (&llist[1], tseq);
3703 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3704 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3705 if (new_vard == new_var)
3706 SET_DECL_VALUE_EXPR (new_var, lvar);
3707 else
3708 SET_DECL_VALUE_EXPR (new_vard,
3709 build_fold_addr_expr (lvar));
3710 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3711 if (x)
3713 tseq = NULL;
3714 dtor = x;
3715 gimplify_stmt (&dtor, &tseq);
3716 gimple_seq_add_seq (&llist[1], tseq);
3718 break;
3720 /* If this is a reference to constant size reduction var
3721 with placeholder, we haven't emitted the initializer
3722 for it because it is undesirable if SIMD arrays are used.
3723 But if they aren't used, we need to emit the deferred
3724 initialization now. */
3725 else if (is_reference (var) && is_simd)
3726 handle_simd_reference (clause_loc, new_vard, ilist);
3727 x = lang_hooks.decls.omp_clause_default_ctor
3728 (c, unshare_expr (new_var),
3729 build_outer_var_ref (var, ctx));
3730 if (x)
3731 gimplify_and_add (x, ilist);
3732 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3734 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3735 lower_omp (&tseq, ctx);
3736 gimple_seq_add_seq (ilist, tseq);
3738 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3739 if (is_simd)
3741 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3742 lower_omp (&tseq, ctx);
3743 gimple_seq_add_seq (dlist, tseq);
3744 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3746 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3747 goto do_dtor;
3749 else
3751 x = omp_reduction_init (c, TREE_TYPE (new_var));
3752 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3753 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3755 /* reduction(-:var) sums up the partial results, so it
3756 acts identically to reduction(+:var). */
3757 if (code == MINUS_EXPR)
3758 code = PLUS_EXPR;
3760 tree new_vard = new_var;
3761 if (is_simd && is_reference (var))
3763 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3764 new_vard = TREE_OPERAND (new_var, 0);
3765 gcc_assert (DECL_P (new_vard));
3767 if (is_simd
3768 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3769 idx, lane, ivar, lvar))
3771 tree ref = build_outer_var_ref (var, ctx);
3773 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3775 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3776 ref = build_outer_var_ref (var, ctx);
3777 gimplify_assign (ref, x, &llist[1]);
3779 if (new_vard != new_var)
3781 SET_DECL_VALUE_EXPR (new_vard,
3782 build_fold_addr_expr (lvar));
3783 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3786 else
3788 if (is_reference (var) && is_simd)
3789 handle_simd_reference (clause_loc, new_vard, ilist);
3790 gimplify_assign (new_var, x, ilist);
3791 if (is_simd)
3793 tree ref = build_outer_var_ref (var, ctx);
3795 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3796 ref = build_outer_var_ref (var, ctx);
3797 gimplify_assign (ref, x, dlist);
3801 break;
3803 default:
3804 gcc_unreachable ();
3809 if (lane)
3811 tree uid = create_tmp_var (ptr_type_node, "simduid");
3812 /* Don't want uninit warnings on simduid, it is always uninitialized,
3813 but we use it not for the value, but for the DECL_UID only. */
3814 TREE_NO_WARNING (uid) = 1;
3815 gimple g
3816 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3817 gimple_call_set_lhs (g, lane);
3818 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3819 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3820 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3821 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3822 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3823 gimple_omp_for_set_clauses (ctx->stmt, c);
3824 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3825 build_int_cst (unsigned_type_node, 0),
3826 NULL_TREE);
3827 gimple_seq_add_stmt (ilist, g);
3828 for (int i = 0; i < 2; i++)
3829 if (llist[i])
3831 tree vf = create_tmp_var (unsigned_type_node, NULL);
3832 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3833 gimple_call_set_lhs (g, vf);
3834 gimple_seq *seq = i == 0 ? ilist : dlist;
3835 gimple_seq_add_stmt (seq, g);
3836 tree t = build_int_cst (unsigned_type_node, 0);
3837 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3838 gimple_seq_add_stmt (seq, g);
3839 tree body = create_artificial_label (UNKNOWN_LOCATION);
3840 tree header = create_artificial_label (UNKNOWN_LOCATION);
3841 tree end = create_artificial_label (UNKNOWN_LOCATION);
3842 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3843 gimple_seq_add_stmt (seq, gimple_build_label (body));
3844 gimple_seq_add_seq (seq, llist[i]);
3845 t = build_int_cst (unsigned_type_node, 1);
3846 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3847 gimple_seq_add_stmt (seq, g);
3848 gimple_seq_add_stmt (seq, gimple_build_label (header));
3849 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3850 gimple_seq_add_stmt (seq, g);
3851 gimple_seq_add_stmt (seq, gimple_build_label (end));
3855 /* The copyin sequence is not to be executed by the main thread, since
3856 that would result in self-copies. Perhaps not visible to scalars,
3857 but it certainly is to C++ operator=. */
3858 if (copyin_seq)
3860 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3862 x = build2 (NE_EXPR, boolean_type_node, x,
3863 build_int_cst (TREE_TYPE (x), 0));
3864 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3865 gimplify_and_add (x, ilist);
3868 /* If any copyin variable is passed by reference, we must ensure the
3869 master thread doesn't modify it before it is copied over in all
3870 threads. Similarly for variables in both firstprivate and
3871 lastprivate clauses we need to ensure the lastprivate copying
3872 happens after firstprivate copying in all threads. And similarly
3873 for UDRs if initializer expression refers to omp_orig. */
3874 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3876 /* Don't add any barrier for #pragma omp simd or
3877 #pragma omp distribute. */
3878 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3879 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
3880 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3883 /* If max_vf is non-zero, then we can use only a vectorization factor
3884 up to the max_vf we chose. So stick it into the safelen clause. */
3885 if (max_vf)
3887 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3888 OMP_CLAUSE_SAFELEN);
3889 if (c == NULL_TREE
3890 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
3891 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3892 max_vf) == 1))
3894 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3895 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3896 max_vf);
3897 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3898 gimple_omp_for_set_clauses (ctx->stmt, c);
3904 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3905 both parallel and workshare constructs. PREDICATE may be NULL if it's
3906 always true. */
3908 static void
3909 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3910 omp_context *ctx)
3912 tree x, c, label = NULL, orig_clauses = clauses;
3913 bool par_clauses = false;
3914 tree simduid = NULL, lastlane = NULL;
3916 /* Early exit if there are no lastprivate or linear clauses. */
3917 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3918 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3919 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3920 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3921 break;
3922 if (clauses == NULL)
3924 /* If this was a workshare clause, see if it had been combined
3925 with its parallel. In that case, look for the clauses on the
3926 parallel statement itself. */
3927 if (is_parallel_ctx (ctx))
3928 return;
3930 ctx = ctx->outer;
3931 if (ctx == NULL || !is_parallel_ctx (ctx))
3932 return;
3934 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3935 OMP_CLAUSE_LASTPRIVATE);
3936 if (clauses == NULL)
3937 return;
3938 par_clauses = true;
3941 if (predicate)
3943 gimple_cond stmt;
3944 tree label_true, arm1, arm2;
3946 label = create_artificial_label (UNKNOWN_LOCATION);
3947 label_true = create_artificial_label (UNKNOWN_LOCATION);
3948 arm1 = TREE_OPERAND (predicate, 0);
3949 arm2 = TREE_OPERAND (predicate, 1);
3950 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3951 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3952 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3953 label_true, label);
3954 gimple_seq_add_stmt (stmt_list, stmt);
3955 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3958 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3959 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3961 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3962 if (simduid)
3963 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3966 for (c = clauses; c ;)
3968 tree var, new_var;
3969 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3971 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3972 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3973 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3975 var = OMP_CLAUSE_DECL (c);
3976 new_var = lookup_decl (var, ctx);
3978 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3980 tree val = DECL_VALUE_EXPR (new_var);
3981 if (TREE_CODE (val) == ARRAY_REF
3982 && VAR_P (TREE_OPERAND (val, 0))
3983 && lookup_attribute ("omp simd array",
3984 DECL_ATTRIBUTES (TREE_OPERAND (val,
3985 0))))
3987 if (lastlane == NULL)
3989 lastlane = create_tmp_var (unsigned_type_node, NULL);
3990 gimple_call g
3991 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3992 2, simduid,
3993 TREE_OPERAND (val, 1));
3994 gimple_call_set_lhs (g, lastlane);
3995 gimple_seq_add_stmt (stmt_list, g);
3997 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3998 TREE_OPERAND (val, 0), lastlane,
3999 NULL_TREE, NULL_TREE);
4003 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
4004 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
4006 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
4007 gimple_seq_add_seq (stmt_list,
4008 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
4009 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
4011 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4012 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
4014 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
4015 gimple_seq_add_seq (stmt_list,
4016 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
4017 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
4020 x = build_outer_var_ref (var, ctx);
4021 if (is_reference (var))
4022 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4023 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
4024 gimplify_and_add (x, stmt_list);
4026 c = OMP_CLAUSE_CHAIN (c);
4027 if (c == NULL && !par_clauses)
4029 /* If this was a workshare clause, see if it had been combined
4030 with its parallel. In that case, continue looking for the
4031 clauses also on the parallel statement itself. */
4032 if (is_parallel_ctx (ctx))
4033 break;
4035 ctx = ctx->outer;
4036 if (ctx == NULL || !is_parallel_ctx (ctx))
4037 break;
4039 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
4040 OMP_CLAUSE_LASTPRIVATE);
4041 par_clauses = true;
4045 if (label)
4046 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
4050 /* Generate code to implement the REDUCTION clauses. */
4052 static void
4053 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
4055 gimple_seq sub_seq = NULL;
4056 gimple stmt;
4057 tree x, c;
4058 int count = 0;
4060 /* SIMD reductions are handled in lower_rec_input_clauses. */
4061 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4062 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
4063 return;
4065 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
4066 update in that case, otherwise use a lock. */
4067 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
4068 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
4070 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4072 /* Never use OMP_ATOMIC for array reductions or UDRs. */
4073 count = -1;
4074 break;
4076 count++;
4079 if (count == 0)
4080 return;
4082 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4084 tree var, ref, new_var;
4085 enum tree_code code;
4086 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4088 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
4089 continue;
4091 var = OMP_CLAUSE_DECL (c);
4092 new_var = lookup_decl (var, ctx);
4093 if (is_reference (var))
4094 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4095 ref = build_outer_var_ref (var, ctx);
4096 code = OMP_CLAUSE_REDUCTION_CODE (c);
4098 /* reduction(-:var) sums up the partial results, so it acts
4099 identically to reduction(+:var). */
4100 if (code == MINUS_EXPR)
4101 code = PLUS_EXPR;
4103 if (count == 1)
4105 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
4107 addr = save_expr (addr);
4108 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
4109 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
4110 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
4111 gimplify_and_add (x, stmt_seqp);
4112 return;
4115 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4117 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4119 if (is_reference (var)
4120 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4121 TREE_TYPE (ref)))
4122 ref = build_fold_addr_expr_loc (clause_loc, ref);
4123 SET_DECL_VALUE_EXPR (placeholder, ref);
4124 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4125 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
4126 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
4127 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4128 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
4130 else
4132 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4133 ref = build_outer_var_ref (var, ctx);
4134 gimplify_assign (ref, x, &sub_seq);
4138 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4140 gimple_seq_add_stmt (stmt_seqp, stmt);
4142 gimple_seq_add_seq (stmt_seqp, sub_seq);
4144 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4146 gimple_seq_add_stmt (stmt_seqp, stmt);
4150 /* Generate code to implement the COPYPRIVATE clauses. */
4152 static void
4153 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4154 omp_context *ctx)
4156 tree c;
4158 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4160 tree var, new_var, ref, x;
4161 bool by_ref;
4162 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4164 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4165 continue;
4167 var = OMP_CLAUSE_DECL (c);
4168 by_ref = use_pointer_for_field (var, NULL);
4170 ref = build_sender_ref (var, ctx);
4171 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4172 if (by_ref)
4174 x = build_fold_addr_expr_loc (clause_loc, new_var);
4175 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4177 gimplify_assign (ref, x, slist);
4179 ref = build_receiver_ref (var, false, ctx);
4180 if (by_ref)
4182 ref = fold_convert_loc (clause_loc,
4183 build_pointer_type (TREE_TYPE (new_var)),
4184 ref);
4185 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4187 if (is_reference (var))
4189 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4190 ref = build_simple_mem_ref_loc (clause_loc, ref);
4191 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4193 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4194 gimplify_and_add (x, rlist);
4199 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4200 and REDUCTION from the sender (aka parent) side. */
4202 static void
4203 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4204 omp_context *ctx)
4206 tree c;
4208 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4210 tree val, ref, x, var;
4211 bool by_ref, do_in = false, do_out = false;
4212 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4214 switch (OMP_CLAUSE_CODE (c))
4216 case OMP_CLAUSE_PRIVATE:
4217 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4218 break;
4219 continue;
4220 case OMP_CLAUSE_FIRSTPRIVATE:
4221 case OMP_CLAUSE_COPYIN:
4222 case OMP_CLAUSE_LASTPRIVATE:
4223 case OMP_CLAUSE_REDUCTION:
4224 case OMP_CLAUSE__LOOPTEMP_:
4225 break;
4226 default:
4227 continue;
4230 val = OMP_CLAUSE_DECL (c);
4231 var = lookup_decl_in_outer_ctx (val, ctx);
4233 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4234 && is_global_var (var))
4235 continue;
4236 if (is_variable_sized (val))
4237 continue;
4238 by_ref = use_pointer_for_field (val, NULL);
4240 switch (OMP_CLAUSE_CODE (c))
4242 case OMP_CLAUSE_PRIVATE:
4243 case OMP_CLAUSE_FIRSTPRIVATE:
4244 case OMP_CLAUSE_COPYIN:
4245 case OMP_CLAUSE__LOOPTEMP_:
4246 do_in = true;
4247 break;
4249 case OMP_CLAUSE_LASTPRIVATE:
4250 if (by_ref || is_reference (val))
4252 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4253 continue;
4254 do_in = true;
4256 else
4258 do_out = true;
4259 if (lang_hooks.decls.omp_private_outer_ref (val))
4260 do_in = true;
4262 break;
4264 case OMP_CLAUSE_REDUCTION:
4265 do_in = true;
4266 do_out = !(by_ref || is_reference (val));
4267 break;
4269 default:
4270 gcc_unreachable ();
4273 if (do_in)
4275 ref = build_sender_ref (val, ctx);
4276 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4277 gimplify_assign (ref, x, ilist);
4278 if (is_task_ctx (ctx))
4279 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4282 if (do_out)
4284 ref = build_sender_ref (val, ctx);
4285 gimplify_assign (var, ref, olist);
4290 /* Generate code to implement SHARED from the sender (aka parent)
4291 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4292 list things that got automatically shared. */
4294 static void
4295 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4297 tree var, ovar, nvar, f, x, record_type;
4299 if (ctx->record_type == NULL)
4300 return;
4302 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4303 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4305 ovar = DECL_ABSTRACT_ORIGIN (f);
4306 nvar = maybe_lookup_decl (ovar, ctx);
4307 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4308 continue;
4310 /* If CTX is a nested parallel directive. Find the immediately
4311 enclosing parallel or workshare construct that contains a
4312 mapping for OVAR. */
4313 var = lookup_decl_in_outer_ctx (ovar, ctx);
4315 if (use_pointer_for_field (ovar, ctx))
4317 x = build_sender_ref (ovar, ctx);
4318 var = build_fold_addr_expr (var);
4319 gimplify_assign (x, var, ilist);
4321 else
4323 x = build_sender_ref (ovar, ctx);
4324 gimplify_assign (x, var, ilist);
4326 if (!TREE_READONLY (var)
4327 /* We don't need to receive a new reference to a result
4328 or parm decl. In fact we may not store to it as we will
4329 invalidate any pending RSO and generate wrong gimple
4330 during inlining. */
4331 && !((TREE_CODE (var) == RESULT_DECL
4332 || TREE_CODE (var) == PARM_DECL)
4333 && DECL_BY_REFERENCE (var)))
4335 x = build_sender_ref (ovar, ctx);
4336 gimplify_assign (var, x, olist);
4343 /* A convenience function to build an empty GIMPLE_COND with just the
4344 condition. */
4346 static gimple_cond
4347 gimple_build_cond_empty (tree cond)
4349 enum tree_code pred_code;
4350 tree lhs, rhs;
4352 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4353 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4357 /* Build the function calls to GOMP_parallel_start etc to actually
4358 generate the parallel operation. REGION is the parallel region
4359 being expanded. BB is the block where to insert the code. WS_ARGS
4360 will be set if this is a call to a combined parallel+workshare
4361 construct, it contains the list of additional arguments needed by
4362 the workshare construct. */
4364 static void
4365 expand_parallel_call (struct omp_region *region, basic_block bb,
4366 gimple_omp_parallel entry_stmt,
4367 vec<tree, va_gc> *ws_args)
4369 tree t, t1, t2, val, cond, c, clauses, flags;
4370 gimple_stmt_iterator gsi;
4371 gimple stmt;
4372 enum built_in_function start_ix;
4373 int start_ix2;
4374 location_t clause_loc;
4375 vec<tree, va_gc> *args;
4377 clauses = gimple_omp_parallel_clauses (entry_stmt);
4379 /* Determine what flavor of GOMP_parallel we will be
4380 emitting. */
4381 start_ix = BUILT_IN_GOMP_PARALLEL;
4382 if (is_combined_parallel (region))
4384 switch (region->inner->type)
4386 case GIMPLE_OMP_FOR:
4387 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4388 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4389 + (region->inner->sched_kind
4390 == OMP_CLAUSE_SCHEDULE_RUNTIME
4391 ? 3 : region->inner->sched_kind));
4392 start_ix = (enum built_in_function)start_ix2;
4393 break;
4394 case GIMPLE_OMP_SECTIONS:
4395 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4396 break;
4397 default:
4398 gcc_unreachable ();
4402 /* By default, the value of NUM_THREADS is zero (selected at run time)
4403 and there is no conditional. */
4404 cond = NULL_TREE;
4405 val = build_int_cst (unsigned_type_node, 0);
4406 flags = build_int_cst (unsigned_type_node, 0);
4408 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4409 if (c)
4410 cond = OMP_CLAUSE_IF_EXPR (c);
4412 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4413 if (c)
4415 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4416 clause_loc = OMP_CLAUSE_LOCATION (c);
4418 else
4419 clause_loc = gimple_location (entry_stmt);
4421 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4422 if (c)
4423 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4425 /* Ensure 'val' is of the correct type. */
4426 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4428 /* If we found the clause 'if (cond)', build either
4429 (cond != 0) or (cond ? val : 1u). */
4430 if (cond)
4432 cond = gimple_boolify (cond);
4434 if (integer_zerop (val))
4435 val = fold_build2_loc (clause_loc,
4436 EQ_EXPR, unsigned_type_node, cond,
4437 build_int_cst (TREE_TYPE (cond), 0));
4438 else
4440 basic_block cond_bb, then_bb, else_bb;
4441 edge e, e_then, e_else;
4442 tree tmp_then, tmp_else, tmp_join, tmp_var;
4444 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4445 if (gimple_in_ssa_p (cfun))
4447 tmp_then = make_ssa_name (tmp_var, NULL);
4448 tmp_else = make_ssa_name (tmp_var, NULL);
4449 tmp_join = make_ssa_name (tmp_var, NULL);
4451 else
4453 tmp_then = tmp_var;
4454 tmp_else = tmp_var;
4455 tmp_join = tmp_var;
4458 e = split_block (bb, NULL);
4459 cond_bb = e->src;
4460 bb = e->dest;
4461 remove_edge (e);
4463 then_bb = create_empty_bb (cond_bb);
4464 else_bb = create_empty_bb (then_bb);
4465 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4466 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4468 stmt = gimple_build_cond_empty (cond);
4469 gsi = gsi_start_bb (cond_bb);
4470 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4472 gsi = gsi_start_bb (then_bb);
4473 stmt = gimple_build_assign (tmp_then, val);
4474 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4476 gsi = gsi_start_bb (else_bb);
4477 stmt = gimple_build_assign
4478 (tmp_else, build_int_cst (unsigned_type_node, 1));
4479 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4481 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4482 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4483 add_bb_to_loop (then_bb, cond_bb->loop_father);
4484 add_bb_to_loop (else_bb, cond_bb->loop_father);
4485 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4486 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4488 if (gimple_in_ssa_p (cfun))
4490 gimple_phi phi = create_phi_node (tmp_join, bb);
4491 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4492 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4495 val = tmp_join;
4498 gsi = gsi_start_bb (bb);
4499 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4500 false, GSI_CONTINUE_LINKING);
4503 gsi = gsi_last_bb (bb);
4504 t = gimple_omp_parallel_data_arg (entry_stmt);
4505 if (t == NULL)
4506 t1 = null_pointer_node;
4507 else
4508 t1 = build_fold_addr_expr (t);
4509 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4511 vec_alloc (args, 4 + vec_safe_length (ws_args));
4512 args->quick_push (t2);
4513 args->quick_push (t1);
4514 args->quick_push (val);
4515 if (ws_args)
4516 args->splice (*ws_args);
4517 args->quick_push (flags);
4519 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4520 builtin_decl_explicit (start_ix), args);
4522 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4523 false, GSI_CONTINUE_LINKING);
4526 /* Insert a function call whose name is FUNC_NAME with the information from
4527 ENTRY_STMT into the basic_block BB. */
4529 static void
4530 expand_cilk_for_call (basic_block bb, gimple_omp_parallel entry_stmt,
4531 vec <tree, va_gc> *ws_args)
4533 tree t, t1, t2;
4534 gimple_stmt_iterator gsi;
4535 vec <tree, va_gc> *args;
4537 gcc_assert (vec_safe_length (ws_args) == 2);
4538 tree func_name = (*ws_args)[0];
4539 tree grain = (*ws_args)[1];
4541 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
4542 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
4543 gcc_assert (count != NULL_TREE);
4544 count = OMP_CLAUSE_OPERAND (count, 0);
4546 gsi = gsi_last_bb (bb);
4547 t = gimple_omp_parallel_data_arg (entry_stmt);
4548 if (t == NULL)
4549 t1 = null_pointer_node;
4550 else
4551 t1 = build_fold_addr_expr (t);
4552 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4554 vec_alloc (args, 4);
4555 args->quick_push (t2);
4556 args->quick_push (t1);
4557 args->quick_push (count);
4558 args->quick_push (grain);
4559 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
4561 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
4562 GSI_CONTINUE_LINKING);
4565 /* Build the function call to GOMP_task to actually
4566 generate the task operation. BB is the block where to insert the code. */
4568 static void
4569 expand_task_call (basic_block bb, gimple_omp_task entry_stmt)
4571 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4572 gimple_stmt_iterator gsi;
4573 location_t loc = gimple_location (entry_stmt);
4575 clauses = gimple_omp_task_clauses (entry_stmt);
4577 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4578 if (c)
4579 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4580 else
4581 cond = boolean_true_node;
4583 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4584 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4585 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4586 flags = build_int_cst (unsigned_type_node,
4587 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4589 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4590 if (c)
4592 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4593 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4594 build_int_cst (unsigned_type_node, 2),
4595 build_int_cst (unsigned_type_node, 0));
4596 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4598 if (depend)
4599 depend = OMP_CLAUSE_DECL (depend);
4600 else
4601 depend = build_int_cst (ptr_type_node, 0);
4603 gsi = gsi_last_bb (bb);
4604 t = gimple_omp_task_data_arg (entry_stmt);
4605 if (t == NULL)
4606 t2 = null_pointer_node;
4607 else
4608 t2 = build_fold_addr_expr_loc (loc, t);
4609 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4610 t = gimple_omp_task_copy_fn (entry_stmt);
4611 if (t == NULL)
4612 t3 = null_pointer_node;
4613 else
4614 t3 = build_fold_addr_expr_loc (loc, t);
4616 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4617 8, t1, t2, t3,
4618 gimple_omp_task_arg_size (entry_stmt),
4619 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4620 depend);
4622 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4623 false, GSI_CONTINUE_LINKING);
4627 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4628 catch handler and return it. This prevents programs from violating the
4629 structured block semantics with throws. */
4631 static gimple_seq
4632 maybe_catch_exception (gimple_seq body)
4634 gimple g;
4635 tree decl;
4637 if (!flag_exceptions)
4638 return body;
4640 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4641 decl = lang_hooks.eh_protect_cleanup_actions ();
4642 else
4643 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4645 g = gimple_build_eh_must_not_throw (decl);
4646 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4647 GIMPLE_TRY_CATCH);
4649 return gimple_seq_alloc_with_stmt (g);
4652 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4654 static tree
4655 vec2chain (vec<tree, va_gc> *v)
4657 tree chain = NULL_TREE, t;
4658 unsigned ix;
4660 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4662 DECL_CHAIN (t) = chain;
4663 chain = t;
4666 return chain;
4670 /* Remove barriers in REGION->EXIT's block. Note that this is only
4671 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4672 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4673 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4674 removed. */
4676 static void
4677 remove_exit_barrier (struct omp_region *region)
4679 gimple_stmt_iterator gsi;
4680 basic_block exit_bb;
4681 edge_iterator ei;
4682 edge e;
4683 gimple stmt;
4684 int any_addressable_vars = -1;
4686 exit_bb = region->exit;
4688 /* If the parallel region doesn't return, we don't have REGION->EXIT
4689 block at all. */
4690 if (! exit_bb)
4691 return;
4693 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4694 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4695 statements that can appear in between are extremely limited -- no
4696 memory operations at all. Here, we allow nothing at all, so the
4697 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4698 gsi = gsi_last_bb (exit_bb);
4699 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4700 gsi_prev (&gsi);
4701 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4702 return;
4704 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4706 gsi = gsi_last_bb (e->src);
4707 if (gsi_end_p (gsi))
4708 continue;
4709 stmt = gsi_stmt (gsi);
4710 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4711 && !gimple_omp_return_nowait_p (stmt))
4713 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4714 in many cases. If there could be tasks queued, the barrier
4715 might be needed to let the tasks run before some local
4716 variable of the parallel that the task uses as shared
4717 runs out of scope. The task can be spawned either
4718 from within current function (this would be easy to check)
4719 or from some function it calls and gets passed an address
4720 of such a variable. */
4721 if (any_addressable_vars < 0)
4723 gimple_omp_parallel parallel_stmt =
4724 as_a <gimple_omp_parallel> (last_stmt (region->entry));
4725 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4726 tree local_decls, block, decl;
4727 unsigned ix;
4729 any_addressable_vars = 0;
4730 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4731 if (TREE_ADDRESSABLE (decl))
4733 any_addressable_vars = 1;
4734 break;
4736 for (block = gimple_block (stmt);
4737 !any_addressable_vars
4738 && block
4739 && TREE_CODE (block) == BLOCK;
4740 block = BLOCK_SUPERCONTEXT (block))
4742 for (local_decls = BLOCK_VARS (block);
4743 local_decls;
4744 local_decls = DECL_CHAIN (local_decls))
4745 if (TREE_ADDRESSABLE (local_decls))
4747 any_addressable_vars = 1;
4748 break;
4750 if (block == gimple_block (parallel_stmt))
4751 break;
4754 if (!any_addressable_vars)
4755 gimple_omp_return_set_nowait (stmt);
4760 static void
4761 remove_exit_barriers (struct omp_region *region)
4763 if (region->type == GIMPLE_OMP_PARALLEL)
4764 remove_exit_barrier (region);
4766 if (region->inner)
4768 region = region->inner;
4769 remove_exit_barriers (region);
4770 while (region->next)
4772 region = region->next;
4773 remove_exit_barriers (region);
4778 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4779 calls. These can't be declared as const functions, but
4780 within one parallel body they are constant, so they can be
4781 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4782 which are declared const. Similarly for task body, except
4783 that in untied task omp_get_thread_num () can change at any task
4784 scheduling point. */
4786 static void
4787 optimize_omp_library_calls (gimple entry_stmt)
4789 basic_block bb;
4790 gimple_stmt_iterator gsi;
4791 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4792 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4793 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4794 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4795 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4796 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4797 OMP_CLAUSE_UNTIED) != NULL);
4799 FOR_EACH_BB_FN (bb, cfun)
4800 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4802 gimple call = gsi_stmt (gsi);
4803 tree decl;
4805 if (is_gimple_call (call)
4806 && (decl = gimple_call_fndecl (call))
4807 && DECL_EXTERNAL (decl)
4808 && TREE_PUBLIC (decl)
4809 && DECL_INITIAL (decl) == NULL)
4811 tree built_in;
4813 if (DECL_NAME (decl) == thr_num_id)
4815 /* In #pragma omp task untied omp_get_thread_num () can change
4816 during the execution of the task region. */
4817 if (untied_task)
4818 continue;
4819 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4821 else if (DECL_NAME (decl) == num_thr_id)
4822 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4823 else
4824 continue;
4826 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4827 || gimple_call_num_args (call) != 0)
4828 continue;
4830 if (flag_exceptions && !TREE_NOTHROW (decl))
4831 continue;
4833 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4834 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4835 TREE_TYPE (TREE_TYPE (built_in))))
4836 continue;
4838 gimple_call_set_fndecl (call, built_in);
4843 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4844 regimplified. */
4846 static tree
4847 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4849 tree t = *tp;
4851 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4852 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4853 return t;
4855 if (TREE_CODE (t) == ADDR_EXPR)
4856 recompute_tree_invariant_for_addr_expr (t);
4858 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4859 return NULL_TREE;
4862 /* Prepend TO = FROM assignment before *GSI_P. */
4864 static void
4865 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4867 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4868 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4869 true, GSI_SAME_STMT);
4870 gimple stmt = gimple_build_assign (to, from);
4871 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4872 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4873 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4875 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4876 gimple_regimplify_operands (stmt, &gsi);
4880 /* Expand the OpenMP parallel or task directive starting at REGION. */
4882 static void
4883 expand_omp_taskreg (struct omp_region *region)
4885 basic_block entry_bb, exit_bb, new_bb;
4886 struct function *child_cfun;
4887 tree child_fn, block, t;
4888 gimple_stmt_iterator gsi;
4889 gimple entry_stmt, stmt;
4890 edge e;
4891 vec<tree, va_gc> *ws_args;
4893 entry_stmt = last_stmt (region->entry);
4894 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4895 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4897 entry_bb = region->entry;
4898 exit_bb = region->exit;
4900 bool is_cilk_for
4901 = (flag_cilkplus
4902 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
4903 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
4904 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
4906 if (is_cilk_for)
4907 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
4908 and the inner statement contains the name of the built-in function
4909 and grain. */
4910 ws_args = region->inner->ws_args;
4911 else if (is_combined_parallel (region))
4912 ws_args = region->ws_args;
4913 else
4914 ws_args = NULL;
4916 if (child_cfun->cfg)
4918 /* Due to inlining, it may happen that we have already outlined
4919 the region, in which case all we need to do is make the
4920 sub-graph unreachable and emit the parallel call. */
4921 edge entry_succ_e, exit_succ_e;
4923 entry_succ_e = single_succ_edge (entry_bb);
4925 gsi = gsi_last_bb (entry_bb);
4926 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4927 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4928 gsi_remove (&gsi, true);
4930 new_bb = entry_bb;
4931 if (exit_bb)
4933 exit_succ_e = single_succ_edge (exit_bb);
4934 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4936 remove_edge_and_dominated_blocks (entry_succ_e);
4938 else
4940 unsigned srcidx, dstidx, num;
4942 /* If the parallel region needs data sent from the parent
4943 function, then the very first statement (except possible
4944 tree profile counter updates) of the parallel body
4945 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4946 &.OMP_DATA_O is passed as an argument to the child function,
4947 we need to replace it with the argument as seen by the child
4948 function.
4950 In most cases, this will end up being the identity assignment
4951 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4952 a function call that has been inlined, the original PARM_DECL
4953 .OMP_DATA_I may have been converted into a different local
4954 variable. In which case, we need to keep the assignment. */
4955 if (gimple_omp_taskreg_data_arg (entry_stmt))
4957 basic_block entry_succ_bb = single_succ (entry_bb);
4958 tree arg, narg;
4959 gimple parcopy_stmt = NULL;
4961 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4963 gimple stmt;
4965 gcc_assert (!gsi_end_p (gsi));
4966 stmt = gsi_stmt (gsi);
4967 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4968 continue;
4970 if (gimple_num_ops (stmt) == 2)
4972 tree arg = gimple_assign_rhs1 (stmt);
4974 /* We're ignore the subcode because we're
4975 effectively doing a STRIP_NOPS. */
4977 if (TREE_CODE (arg) == ADDR_EXPR
4978 && TREE_OPERAND (arg, 0)
4979 == gimple_omp_taskreg_data_arg (entry_stmt))
4981 parcopy_stmt = stmt;
4982 break;
4987 gcc_assert (parcopy_stmt != NULL);
4988 arg = DECL_ARGUMENTS (child_fn);
4990 if (!gimple_in_ssa_p (cfun))
4992 if (gimple_assign_lhs (parcopy_stmt) == arg)
4993 gsi_remove (&gsi, true);
4994 else
4996 /* ?? Is setting the subcode really necessary ?? */
4997 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4998 gimple_assign_set_rhs1 (parcopy_stmt, arg);
5001 else
5003 /* If we are in ssa form, we must load the value from the default
5004 definition of the argument. That should not be defined now,
5005 since the argument is not used uninitialized. */
5006 gcc_assert (ssa_default_def (cfun, arg) == NULL);
5007 narg = make_ssa_name (arg, gimple_build_nop ());
5008 set_ssa_default_def (cfun, arg, narg);
5009 /* ?? Is setting the subcode really necessary ?? */
5010 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
5011 gimple_assign_set_rhs1 (parcopy_stmt, narg);
5012 update_stmt (parcopy_stmt);
5016 /* Declare local variables needed in CHILD_CFUN. */
5017 block = DECL_INITIAL (child_fn);
5018 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
5019 /* The gimplifier could record temporaries in parallel/task block
5020 rather than in containing function's local_decls chain,
5021 which would mean cgraph missed finalizing them. Do it now. */
5022 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
5023 if (TREE_CODE (t) == VAR_DECL
5024 && TREE_STATIC (t)
5025 && !DECL_EXTERNAL (t))
5026 varpool_node::finalize_decl (t);
5027 DECL_SAVED_TREE (child_fn) = NULL;
5028 /* We'll create a CFG for child_fn, so no gimple body is needed. */
5029 gimple_set_body (child_fn, NULL);
5030 TREE_USED (block) = 1;
5032 /* Reset DECL_CONTEXT on function arguments. */
5033 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
5034 DECL_CONTEXT (t) = child_fn;
5036 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
5037 so that it can be moved to the child function. */
5038 gsi = gsi_last_bb (entry_bb);
5039 stmt = gsi_stmt (gsi);
5040 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
5041 || gimple_code (stmt) == GIMPLE_OMP_TASK));
5042 gsi_remove (&gsi, true);
5043 e = split_block (entry_bb, stmt);
5044 entry_bb = e->dest;
5045 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5047 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5048 if (exit_bb)
5050 gsi = gsi_last_bb (exit_bb);
5051 gcc_assert (!gsi_end_p (gsi)
5052 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5053 stmt = gimple_build_return (NULL);
5054 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5055 gsi_remove (&gsi, true);
5058 /* Move the parallel region into CHILD_CFUN. */
5060 if (gimple_in_ssa_p (cfun))
5062 init_tree_ssa (child_cfun);
5063 init_ssa_operands (child_cfun);
5064 child_cfun->gimple_df->in_ssa_p = true;
5065 block = NULL_TREE;
5067 else
5068 block = gimple_block (entry_stmt);
5070 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
5071 if (exit_bb)
5072 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
5073 /* When the OMP expansion process cannot guarantee an up-to-date
5074 loop tree arrange for the child function to fixup loops. */
5075 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
5076 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
5078 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
5079 num = vec_safe_length (child_cfun->local_decls);
5080 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
5082 t = (*child_cfun->local_decls)[srcidx];
5083 if (DECL_CONTEXT (t) == cfun->decl)
5084 continue;
5085 if (srcidx != dstidx)
5086 (*child_cfun->local_decls)[dstidx] = t;
5087 dstidx++;
5089 if (dstidx != num)
5090 vec_safe_truncate (child_cfun->local_decls, dstidx);
5092 /* Inform the callgraph about the new function. */
5093 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
5094 cgraph_node::add_new_function (child_fn, true);
5096 /* Fix the callgraph edges for child_cfun. Those for cfun will be
5097 fixed in a following pass. */
5098 push_cfun (child_cfun);
5099 if (optimize)
5100 optimize_omp_library_calls (entry_stmt);
5101 cgraph_edge::rebuild_edges ();
5103 /* Some EH regions might become dead, see PR34608. If
5104 pass_cleanup_cfg isn't the first pass to happen with the
5105 new child, these dead EH edges might cause problems.
5106 Clean them up now. */
5107 if (flag_exceptions)
5109 basic_block bb;
5110 bool changed = false;
5112 FOR_EACH_BB_FN (bb, cfun)
5113 changed |= gimple_purge_dead_eh_edges (bb);
5114 if (changed)
5115 cleanup_tree_cfg ();
5117 if (gimple_in_ssa_p (cfun))
5118 update_ssa (TODO_update_ssa);
5119 pop_cfun ();
5122 /* Emit a library call to launch the children threads. */
5123 if (is_cilk_for)
5124 expand_cilk_for_call (new_bb,
5125 as_a <gimple_omp_parallel> (entry_stmt), ws_args);
5126 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
5127 expand_parallel_call (region, new_bb,
5128 as_a <gimple_omp_parallel> (entry_stmt), ws_args);
5129 else
5130 expand_task_call (new_bb, as_a <gimple_omp_task> (entry_stmt));
5131 if (gimple_in_ssa_p (cfun))
5132 update_ssa (TODO_update_ssa_only_virtuals);
5136 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
5137 of the combined collapse > 1 loop constructs, generate code like:
5138 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
5139 if (cond3 is <)
5140 adj = STEP3 - 1;
5141 else
5142 adj = STEP3 + 1;
5143 count3 = (adj + N32 - N31) / STEP3;
5144 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
5145 if (cond2 is <)
5146 adj = STEP2 - 1;
5147 else
5148 adj = STEP2 + 1;
5149 count2 = (adj + N22 - N21) / STEP2;
5150 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
5151 if (cond1 is <)
5152 adj = STEP1 - 1;
5153 else
5154 adj = STEP1 + 1;
5155 count1 = (adj + N12 - N11) / STEP1;
5156 count = count1 * count2 * count3;
5157 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
5158 count = 0;
5159 and set ZERO_ITER_BB to that bb. If this isn't the outermost
5160 of the combined loop constructs, just initialize COUNTS array
5161 from the _looptemp_ clauses. */
5163 /* NOTE: It *could* be better to moosh all of the BBs together,
5164 creating one larger BB with all the computation and the unexpected
5165 jump at the end. I.e.
5167 bool zero3, zero2, zero1, zero;
5169 zero3 = N32 c3 N31;
5170 count3 = (N32 - N31) /[cl] STEP3;
5171 zero2 = N22 c2 N21;
5172 count2 = (N22 - N21) /[cl] STEP2;
5173 zero1 = N12 c1 N11;
5174 count1 = (N12 - N11) /[cl] STEP1;
5175 zero = zero3 || zero2 || zero1;
5176 count = count1 * count2 * count3;
5177 if (__builtin_expect(zero, false)) goto zero_iter_bb;
5179 After all, we expect the zero=false, and thus we expect to have to
5180 evaluate all of the comparison expressions, so short-circuiting
5181 oughtn't be a win. Since the condition isn't protecting a
5182 denominator, we're not concerned about divide-by-zero, so we can
5183 fully evaluate count even if a numerator turned out to be wrong.
5185 It seems like putting this all together would create much better
5186 scheduling opportunities, and less pressure on the chip's branch
5187 predictor. */
5189 static void
5190 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5191 basic_block &entry_bb, tree *counts,
5192 basic_block &zero_iter_bb, int &first_zero_iter,
5193 basic_block &l2_dom_bb)
5195 tree t, type = TREE_TYPE (fd->loop.v);
5196 edge e, ne;
5197 int i;
5199 /* Collapsed loops need work for expansion into SSA form. */
5200 gcc_assert (!gimple_in_ssa_p (cfun));
5202 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5203 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5205 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5206 isn't supposed to be handled, as the inner loop doesn't
5207 use it. */
5208 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5209 OMP_CLAUSE__LOOPTEMP_);
5210 gcc_assert (innerc);
5211 for (i = 0; i < fd->collapse; i++)
5213 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5214 OMP_CLAUSE__LOOPTEMP_);
5215 gcc_assert (innerc);
5216 if (i)
5217 counts[i] = OMP_CLAUSE_DECL (innerc);
5218 else
5219 counts[0] = NULL_TREE;
5221 return;
5224 for (i = 0; i < fd->collapse; i++)
5226 tree itype = TREE_TYPE (fd->loops[i].v);
5228 if (SSA_VAR_P (fd->loop.n2)
5229 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5230 fold_convert (itype, fd->loops[i].n1),
5231 fold_convert (itype, fd->loops[i].n2)))
5232 == NULL_TREE || !integer_onep (t)))
5234 gimple_cond cond_stmt;
5235 tree n1, n2;
5236 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5237 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5238 true, GSI_SAME_STMT);
5239 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5240 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5241 true, GSI_SAME_STMT);
5242 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5243 NULL_TREE, NULL_TREE);
5244 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
5245 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
5246 expand_omp_regimplify_p, NULL, NULL)
5247 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
5248 expand_omp_regimplify_p, NULL, NULL))
5250 *gsi = gsi_for_stmt (cond_stmt);
5251 gimple_regimplify_operands (cond_stmt, gsi);
5253 e = split_block (entry_bb, cond_stmt);
5254 if (zero_iter_bb == NULL)
5256 gimple_assign assign_stmt;
5257 first_zero_iter = i;
5258 zero_iter_bb = create_empty_bb (entry_bb);
5259 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5260 *gsi = gsi_after_labels (zero_iter_bb);
5261 assign_stmt = gimple_build_assign (fd->loop.n2,
5262 build_zero_cst (type));
5263 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
5264 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5265 entry_bb);
5267 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5268 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5269 e->flags = EDGE_TRUE_VALUE;
5270 e->probability = REG_BR_PROB_BASE - ne->probability;
5271 if (l2_dom_bb == NULL)
5272 l2_dom_bb = entry_bb;
5273 entry_bb = e->dest;
5274 *gsi = gsi_last_bb (entry_bb);
5277 if (POINTER_TYPE_P (itype))
5278 itype = signed_type_for (itype);
5279 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5280 ? -1 : 1));
5281 t = fold_build2 (PLUS_EXPR, itype,
5282 fold_convert (itype, fd->loops[i].step), t);
5283 t = fold_build2 (PLUS_EXPR, itype, t,
5284 fold_convert (itype, fd->loops[i].n2));
5285 t = fold_build2 (MINUS_EXPR, itype, t,
5286 fold_convert (itype, fd->loops[i].n1));
5287 /* ?? We could probably use CEIL_DIV_EXPR instead of
5288 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5289 generate the same code in the end because generically we
5290 don't know that the values involved must be negative for
5291 GT?? */
5292 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5293 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5294 fold_build1 (NEGATE_EXPR, itype, t),
5295 fold_build1 (NEGATE_EXPR, itype,
5296 fold_convert (itype,
5297 fd->loops[i].step)));
5298 else
5299 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5300 fold_convert (itype, fd->loops[i].step));
5301 t = fold_convert (type, t);
5302 if (TREE_CODE (t) == INTEGER_CST)
5303 counts[i] = t;
5304 else
5306 counts[i] = create_tmp_reg (type, ".count");
5307 expand_omp_build_assign (gsi, counts[i], t);
5309 if (SSA_VAR_P (fd->loop.n2))
5311 if (i == 0)
5312 t = counts[0];
5313 else
5314 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5315 expand_omp_build_assign (gsi, fd->loop.n2, t);
5321 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5322 T = V;
5323 V3 = N31 + (T % count3) * STEP3;
5324 T = T / count3;
5325 V2 = N21 + (T % count2) * STEP2;
5326 T = T / count2;
5327 V1 = N11 + T * STEP1;
5328 if this loop doesn't have an inner loop construct combined with it.
5329 If it does have an inner loop construct combined with it and the
5330 iteration count isn't known constant, store values from counts array
5331 into its _looptemp_ temporaries instead. */
5333 static void
5334 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5335 tree *counts, gimple inner_stmt, tree startvar)
5337 int i;
5338 if (gimple_omp_for_combined_p (fd->for_stmt))
5340 /* If fd->loop.n2 is constant, then no propagation of the counts
5341 is needed, they are constant. */
5342 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5343 return;
5345 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5346 ? gimple_omp_parallel_clauses (inner_stmt)
5347 : gimple_omp_for_clauses (inner_stmt);
5348 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5349 isn't supposed to be handled, as the inner loop doesn't
5350 use it. */
5351 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5352 gcc_assert (innerc);
5353 for (i = 0; i < fd->collapse; i++)
5355 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5356 OMP_CLAUSE__LOOPTEMP_);
5357 gcc_assert (innerc);
5358 if (i)
5360 tree tem = OMP_CLAUSE_DECL (innerc);
5361 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5362 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5363 false, GSI_CONTINUE_LINKING);
5364 gimple_assign stmt = gimple_build_assign (tem, t);
5365 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5368 return;
5371 tree type = TREE_TYPE (fd->loop.v);
5372 tree tem = create_tmp_reg (type, ".tem");
5373 gimple_assign stmt = gimple_build_assign (tem, startvar);
5374 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5376 for (i = fd->collapse - 1; i >= 0; i--)
5378 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5379 itype = vtype;
5380 if (POINTER_TYPE_P (vtype))
5381 itype = signed_type_for (vtype);
5382 if (i != 0)
5383 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5384 else
5385 t = tem;
5386 t = fold_convert (itype, t);
5387 t = fold_build2 (MULT_EXPR, itype, t,
5388 fold_convert (itype, fd->loops[i].step));
5389 if (POINTER_TYPE_P (vtype))
5390 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5391 else
5392 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5393 t = force_gimple_operand_gsi (gsi, t,
5394 DECL_P (fd->loops[i].v)
5395 && TREE_ADDRESSABLE (fd->loops[i].v),
5396 NULL_TREE, false,
5397 GSI_CONTINUE_LINKING);
5398 stmt = gimple_build_assign (fd->loops[i].v, t);
5399 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5400 if (i != 0)
5402 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5403 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5404 false, GSI_CONTINUE_LINKING);
5405 stmt = gimple_build_assign (tem, t);
5406 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5412 /* Helper function for expand_omp_for_*. Generate code like:
5413 L10:
5414 V3 += STEP3;
5415 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5416 L11:
5417 V3 = N31;
5418 V2 += STEP2;
5419 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5420 L12:
5421 V2 = N21;
5422 V1 += STEP1;
5423 goto BODY_BB; */
5425 static basic_block
5426 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5427 basic_block body_bb)
5429 basic_block last_bb, bb, collapse_bb = NULL;
5430 int i;
5431 gimple_stmt_iterator gsi;
5432 edge e;
5433 tree t;
5434 gimple stmt;
5436 last_bb = cont_bb;
5437 for (i = fd->collapse - 1; i >= 0; i--)
5439 tree vtype = TREE_TYPE (fd->loops[i].v);
5441 bb = create_empty_bb (last_bb);
5442 add_bb_to_loop (bb, last_bb->loop_father);
5443 gsi = gsi_start_bb (bb);
5445 if (i < fd->collapse - 1)
5447 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5448 e->probability = REG_BR_PROB_BASE / 8;
5450 t = fd->loops[i + 1].n1;
5451 t = force_gimple_operand_gsi (&gsi, t,
5452 DECL_P (fd->loops[i + 1].v)
5453 && TREE_ADDRESSABLE (fd->loops[i
5454 + 1].v),
5455 NULL_TREE, false,
5456 GSI_CONTINUE_LINKING);
5457 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5458 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5460 else
5461 collapse_bb = bb;
5463 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5465 if (POINTER_TYPE_P (vtype))
5466 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5467 else
5468 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5469 t = force_gimple_operand_gsi (&gsi, t,
5470 DECL_P (fd->loops[i].v)
5471 && TREE_ADDRESSABLE (fd->loops[i].v),
5472 NULL_TREE, false, GSI_CONTINUE_LINKING);
5473 stmt = gimple_build_assign (fd->loops[i].v, t);
5474 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5476 if (i > 0)
5478 t = fd->loops[i].n2;
5479 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5480 false, GSI_CONTINUE_LINKING);
5481 tree v = fd->loops[i].v;
5482 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5483 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5484 false, GSI_CONTINUE_LINKING);
5485 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5486 stmt = gimple_build_cond_empty (t);
5487 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5488 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5489 e->probability = REG_BR_PROB_BASE * 7 / 8;
5491 else
5492 make_edge (bb, body_bb, EDGE_FALLTHRU);
5493 last_bb = bb;
5496 return collapse_bb;
5500 /* A subroutine of expand_omp_for. Generate code for a parallel
5501 loop with any schedule. Given parameters:
5503 for (V = N1; V cond N2; V += STEP) BODY;
5505 where COND is "<" or ">", we generate pseudocode
5507 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5508 if (more) goto L0; else goto L3;
5510 V = istart0;
5511 iend = iend0;
5513 BODY;
5514 V += STEP;
5515 if (V cond iend) goto L1; else goto L2;
5517 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5520 If this is a combined omp parallel loop, instead of the call to
5521 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5522 If this is gimple_omp_for_combined_p loop, then instead of assigning
5523 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5524 inner GIMPLE_OMP_FOR and V += STEP; and
5525 if (V cond iend) goto L1; else goto L2; are removed.
5527 For collapsed loops, given parameters:
5528 collapse(3)
5529 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5530 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5531 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5532 BODY;
5534 we generate pseudocode
5536 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5537 if (cond3 is <)
5538 adj = STEP3 - 1;
5539 else
5540 adj = STEP3 + 1;
5541 count3 = (adj + N32 - N31) / STEP3;
5542 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5543 if (cond2 is <)
5544 adj = STEP2 - 1;
5545 else
5546 adj = STEP2 + 1;
5547 count2 = (adj + N22 - N21) / STEP2;
5548 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5549 if (cond1 is <)
5550 adj = STEP1 - 1;
5551 else
5552 adj = STEP1 + 1;
5553 count1 = (adj + N12 - N11) / STEP1;
5554 count = count1 * count2 * count3;
5555 goto Z1;
5557 count = 0;
5559 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5560 if (more) goto L0; else goto L3;
5562 V = istart0;
5563 T = V;
5564 V3 = N31 + (T % count3) * STEP3;
5565 T = T / count3;
5566 V2 = N21 + (T % count2) * STEP2;
5567 T = T / count2;
5568 V1 = N11 + T * STEP1;
5569 iend = iend0;
5571 BODY;
5572 V += 1;
5573 if (V < iend) goto L10; else goto L2;
5574 L10:
5575 V3 += STEP3;
5576 if (V3 cond3 N32) goto L1; else goto L11;
5577 L11:
5578 V3 = N31;
5579 V2 += STEP2;
5580 if (V2 cond2 N22) goto L1; else goto L12;
5581 L12:
5582 V2 = N21;
5583 V1 += STEP1;
5584 goto L1;
5586 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5591 static void
5592 expand_omp_for_generic (struct omp_region *region,
5593 struct omp_for_data *fd,
5594 enum built_in_function start_fn,
5595 enum built_in_function next_fn,
5596 gimple inner_stmt)
5598 tree type, istart0, iend0, iend;
5599 tree t, vmain, vback, bias = NULL_TREE;
5600 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5601 basic_block l2_bb = NULL, l3_bb = NULL;
5602 gimple_stmt_iterator gsi;
5603 gimple_assign assign_stmt;
5604 bool in_combined_parallel = is_combined_parallel (region);
5605 bool broken_loop = region->cont == NULL;
5606 edge e, ne;
5607 tree *counts = NULL;
5608 int i;
5610 gcc_assert (!broken_loop || !in_combined_parallel);
5611 gcc_assert (fd->iter_type == long_integer_type_node
5612 || !in_combined_parallel);
5614 type = TREE_TYPE (fd->loop.v);
5615 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5616 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5617 TREE_ADDRESSABLE (istart0) = 1;
5618 TREE_ADDRESSABLE (iend0) = 1;
5620 /* See if we need to bias by LLONG_MIN. */
5621 if (fd->iter_type == long_long_unsigned_type_node
5622 && TREE_CODE (type) == INTEGER_TYPE
5623 && !TYPE_UNSIGNED (type))
5625 tree n1, n2;
5627 if (fd->loop.cond_code == LT_EXPR)
5629 n1 = fd->loop.n1;
5630 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5632 else
5634 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5635 n2 = fd->loop.n1;
5637 if (TREE_CODE (n1) != INTEGER_CST
5638 || TREE_CODE (n2) != INTEGER_CST
5639 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5640 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5643 entry_bb = region->entry;
5644 cont_bb = region->cont;
5645 collapse_bb = NULL;
5646 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5647 gcc_assert (broken_loop
5648 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5649 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5650 l1_bb = single_succ (l0_bb);
5651 if (!broken_loop)
5653 l2_bb = create_empty_bb (cont_bb);
5654 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5655 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5657 else
5658 l2_bb = NULL;
5659 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5660 exit_bb = region->exit;
5662 gsi = gsi_last_bb (entry_bb);
5664 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5665 if (fd->collapse > 1)
5667 int first_zero_iter = -1;
5668 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5670 counts = XALLOCAVEC (tree, fd->collapse);
5671 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5672 zero_iter_bb, first_zero_iter,
5673 l2_dom_bb);
5675 if (zero_iter_bb)
5677 /* Some counts[i] vars might be uninitialized if
5678 some loop has zero iterations. But the body shouldn't
5679 be executed in that case, so just avoid uninit warnings. */
5680 for (i = first_zero_iter; i < fd->collapse; i++)
5681 if (SSA_VAR_P (counts[i]))
5682 TREE_NO_WARNING (counts[i]) = 1;
5683 gsi_prev (&gsi);
5684 e = split_block (entry_bb, gsi_stmt (gsi));
5685 entry_bb = e->dest;
5686 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5687 gsi = gsi_last_bb (entry_bb);
5688 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5689 get_immediate_dominator (CDI_DOMINATORS,
5690 zero_iter_bb));
5693 if (in_combined_parallel)
5695 /* In a combined parallel loop, emit a call to
5696 GOMP_loop_foo_next. */
5697 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5698 build_fold_addr_expr (istart0),
5699 build_fold_addr_expr (iend0));
5701 else
5703 tree t0, t1, t2, t3, t4;
5704 /* If this is not a combined parallel loop, emit a call to
5705 GOMP_loop_foo_start in ENTRY_BB. */
5706 t4 = build_fold_addr_expr (iend0);
5707 t3 = build_fold_addr_expr (istart0);
5708 t2 = fold_convert (fd->iter_type, fd->loop.step);
5709 t1 = fd->loop.n2;
5710 t0 = fd->loop.n1;
5711 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5713 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5714 OMP_CLAUSE__LOOPTEMP_);
5715 gcc_assert (innerc);
5716 t0 = OMP_CLAUSE_DECL (innerc);
5717 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5718 OMP_CLAUSE__LOOPTEMP_);
5719 gcc_assert (innerc);
5720 t1 = OMP_CLAUSE_DECL (innerc);
5722 if (POINTER_TYPE_P (TREE_TYPE (t0))
5723 && TYPE_PRECISION (TREE_TYPE (t0))
5724 != TYPE_PRECISION (fd->iter_type))
5726 /* Avoid casting pointers to integer of a different size. */
5727 tree itype = signed_type_for (type);
5728 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5729 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5731 else
5733 t1 = fold_convert (fd->iter_type, t1);
5734 t0 = fold_convert (fd->iter_type, t0);
5736 if (bias)
5738 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5739 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5741 if (fd->iter_type == long_integer_type_node)
5743 if (fd->chunk_size)
5745 t = fold_convert (fd->iter_type, fd->chunk_size);
5746 t = build_call_expr (builtin_decl_explicit (start_fn),
5747 6, t0, t1, t2, t, t3, t4);
5749 else
5750 t = build_call_expr (builtin_decl_explicit (start_fn),
5751 5, t0, t1, t2, t3, t4);
5753 else
5755 tree t5;
5756 tree c_bool_type;
5757 tree bfn_decl;
5759 /* The GOMP_loop_ull_*start functions have additional boolean
5760 argument, true for < loops and false for > loops.
5761 In Fortran, the C bool type can be different from
5762 boolean_type_node. */
5763 bfn_decl = builtin_decl_explicit (start_fn);
5764 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5765 t5 = build_int_cst (c_bool_type,
5766 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5767 if (fd->chunk_size)
5769 tree bfn_decl = builtin_decl_explicit (start_fn);
5770 t = fold_convert (fd->iter_type, fd->chunk_size);
5771 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5773 else
5774 t = build_call_expr (builtin_decl_explicit (start_fn),
5775 6, t5, t0, t1, t2, t3, t4);
5778 if (TREE_TYPE (t) != boolean_type_node)
5779 t = fold_build2 (NE_EXPR, boolean_type_node,
5780 t, build_int_cst (TREE_TYPE (t), 0));
5781 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5782 true, GSI_SAME_STMT);
5783 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5785 /* Remove the GIMPLE_OMP_FOR statement. */
5786 gsi_remove (&gsi, true);
5788 /* Iteration setup for sequential loop goes in L0_BB. */
5789 tree startvar = fd->loop.v;
5790 tree endvar = NULL_TREE;
5792 if (gimple_omp_for_combined_p (fd->for_stmt))
5794 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5795 && gimple_omp_for_kind (inner_stmt)
5796 == GF_OMP_FOR_KIND_SIMD);
5797 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5798 OMP_CLAUSE__LOOPTEMP_);
5799 gcc_assert (innerc);
5800 startvar = OMP_CLAUSE_DECL (innerc);
5801 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5802 OMP_CLAUSE__LOOPTEMP_);
5803 gcc_assert (innerc);
5804 endvar = OMP_CLAUSE_DECL (innerc);
5807 gsi = gsi_start_bb (l0_bb);
5808 t = istart0;
5809 if (bias)
5810 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5811 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5812 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5813 t = fold_convert (TREE_TYPE (startvar), t);
5814 t = force_gimple_operand_gsi (&gsi, t,
5815 DECL_P (startvar)
5816 && TREE_ADDRESSABLE (startvar),
5817 NULL_TREE, false, GSI_CONTINUE_LINKING);
5818 assign_stmt = gimple_build_assign (startvar, t);
5819 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5821 t = iend0;
5822 if (bias)
5823 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5824 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5825 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5826 t = fold_convert (TREE_TYPE (startvar), t);
5827 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5828 false, GSI_CONTINUE_LINKING);
5829 if (endvar)
5831 assign_stmt = gimple_build_assign (endvar, iend);
5832 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5833 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
5834 assign_stmt = gimple_build_assign (fd->loop.v, iend);
5835 else
5836 assign_stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, iend,
5837 NULL_TREE);
5838 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5840 if (fd->collapse > 1)
5841 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5843 if (!broken_loop)
5845 /* Code to control the increment and predicate for the sequential
5846 loop goes in the CONT_BB. */
5847 gsi = gsi_last_bb (cont_bb);
5848 gimple_omp_continue cont_stmt =
5849 as_a <gimple_omp_continue> (gsi_stmt (gsi));
5850 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
5851 vmain = gimple_omp_continue_control_use (cont_stmt);
5852 vback = gimple_omp_continue_control_def (cont_stmt);
5854 if (!gimple_omp_for_combined_p (fd->for_stmt))
5856 if (POINTER_TYPE_P (type))
5857 t = fold_build_pointer_plus (vmain, fd->loop.step);
5858 else
5859 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5860 t = force_gimple_operand_gsi (&gsi, t,
5861 DECL_P (vback)
5862 && TREE_ADDRESSABLE (vback),
5863 NULL_TREE, true, GSI_SAME_STMT);
5864 assign_stmt = gimple_build_assign (vback, t);
5865 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
5867 t = build2 (fd->loop.cond_code, boolean_type_node,
5868 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5869 iend);
5870 gimple_cond cond_stmt = gimple_build_cond_empty (t);
5871 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
5874 /* Remove GIMPLE_OMP_CONTINUE. */
5875 gsi_remove (&gsi, true);
5877 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5878 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5880 /* Emit code to get the next parallel iteration in L2_BB. */
5881 gsi = gsi_start_bb (l2_bb);
5883 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5884 build_fold_addr_expr (istart0),
5885 build_fold_addr_expr (iend0));
5886 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5887 false, GSI_CONTINUE_LINKING);
5888 if (TREE_TYPE (t) != boolean_type_node)
5889 t = fold_build2 (NE_EXPR, boolean_type_node,
5890 t, build_int_cst (TREE_TYPE (t), 0));
5891 gimple_cond cond_stmt = gimple_build_cond_empty (t);
5892 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
5895 /* Add the loop cleanup function. */
5896 gsi = gsi_last_bb (exit_bb);
5897 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5898 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5899 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5900 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5901 else
5902 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5903 gimple_call call_stmt = gimple_build_call (t, 0);
5904 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5905 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5906 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
5907 gsi_remove (&gsi, true);
5909 /* Connect the new blocks. */
5910 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5911 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5913 if (!broken_loop)
5915 gimple_seq phis;
5917 e = find_edge (cont_bb, l3_bb);
5918 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5920 phis = phi_nodes (l3_bb);
5921 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5923 gimple phi = gsi_stmt (gsi);
5924 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5925 PHI_ARG_DEF_FROM_EDGE (phi, e));
5927 remove_edge (e);
5929 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5930 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5931 e = find_edge (cont_bb, l1_bb);
5932 if (gimple_omp_for_combined_p (fd->for_stmt))
5934 remove_edge (e);
5935 e = NULL;
5937 else if (fd->collapse > 1)
5939 remove_edge (e);
5940 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5942 else
5943 e->flags = EDGE_TRUE_VALUE;
5944 if (e)
5946 e->probability = REG_BR_PROB_BASE * 7 / 8;
5947 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5949 else
5951 e = find_edge (cont_bb, l2_bb);
5952 e->flags = EDGE_FALLTHRU;
5954 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5956 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5957 recompute_dominator (CDI_DOMINATORS, l2_bb));
5958 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5959 recompute_dominator (CDI_DOMINATORS, l3_bb));
5960 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5961 recompute_dominator (CDI_DOMINATORS, l0_bb));
5962 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5963 recompute_dominator (CDI_DOMINATORS, l1_bb));
5965 struct loop *outer_loop = alloc_loop ();
5966 outer_loop->header = l0_bb;
5967 outer_loop->latch = l2_bb;
5968 add_loop (outer_loop, l0_bb->loop_father);
5970 if (!gimple_omp_for_combined_p (fd->for_stmt))
5972 struct loop *loop = alloc_loop ();
5973 loop->header = l1_bb;
5974 /* The loop may have multiple latches. */
5975 add_loop (loop, outer_loop);
5981 /* A subroutine of expand_omp_for. Generate code for a parallel
5982 loop with static schedule and no specified chunk size. Given
5983 parameters:
5985 for (V = N1; V cond N2; V += STEP) BODY;
5987 where COND is "<" or ">", we generate pseudocode
5989 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5990 if (cond is <)
5991 adj = STEP - 1;
5992 else
5993 adj = STEP + 1;
5994 if ((__typeof (V)) -1 > 0 && cond is >)
5995 n = -(adj + N2 - N1) / -STEP;
5996 else
5997 n = (adj + N2 - N1) / STEP;
5998 q = n / nthreads;
5999 tt = n % nthreads;
6000 if (threadid < tt) goto L3; else goto L4;
6002 tt = 0;
6003 q = q + 1;
6005 s0 = q * threadid + tt;
6006 e0 = s0 + q;
6007 V = s0 * STEP + N1;
6008 if (s0 >= e0) goto L2; else goto L0;
6010 e = e0 * STEP + N1;
6012 BODY;
6013 V += STEP;
6014 if (V cond e) goto L1;
6018 static void
6019 expand_omp_for_static_nochunk (struct omp_region *region,
6020 struct omp_for_data *fd,
6021 gimple inner_stmt)
6023 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
6024 tree type, itype, vmain, vback;
6025 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
6026 basic_block body_bb, cont_bb, collapse_bb = NULL;
6027 basic_block fin_bb;
6028 gimple_stmt_iterator gsi;
6029 edge ep;
6030 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6031 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6032 bool broken_loop = region->cont == NULL;
6033 tree *counts = NULL;
6034 tree n1, n2, step;
6036 itype = type = TREE_TYPE (fd->loop.v);
6037 if (POINTER_TYPE_P (type))
6038 itype = signed_type_for (type);
6040 entry_bb = region->entry;
6041 cont_bb = region->cont;
6042 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6043 fin_bb = BRANCH_EDGE (entry_bb)->dest;
6044 gcc_assert (broken_loop
6045 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
6046 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
6047 body_bb = single_succ (seq_start_bb);
6048 if (!broken_loop)
6050 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6051 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6053 exit_bb = region->exit;
6055 /* Iteration space partitioning goes in ENTRY_BB. */
6056 gsi = gsi_last_bb (entry_bb);
6057 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6059 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6061 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6062 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6065 if (fd->collapse > 1)
6067 int first_zero_iter = -1;
6068 basic_block l2_dom_bb = NULL;
6070 counts = XALLOCAVEC (tree, fd->collapse);
6071 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6072 fin_bb, first_zero_iter,
6073 l2_dom_bb);
6074 t = NULL_TREE;
6076 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6077 t = integer_one_node;
6078 else
6079 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6080 fold_convert (type, fd->loop.n1),
6081 fold_convert (type, fd->loop.n2));
6082 if (fd->collapse == 1
6083 && TYPE_UNSIGNED (type)
6084 && (t == NULL_TREE || !integer_onep (t)))
6086 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6087 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6088 true, GSI_SAME_STMT);
6089 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6090 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6091 true, GSI_SAME_STMT);
6092 gimple_cond cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6093 NULL_TREE, NULL_TREE);
6094 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6095 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6096 expand_omp_regimplify_p, NULL, NULL)
6097 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6098 expand_omp_regimplify_p, NULL, NULL))
6100 gsi = gsi_for_stmt (cond_stmt);
6101 gimple_regimplify_operands (cond_stmt, &gsi);
6103 ep = split_block (entry_bb, cond_stmt);
6104 ep->flags = EDGE_TRUE_VALUE;
6105 entry_bb = ep->dest;
6106 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6107 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
6108 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
6109 if (gimple_in_ssa_p (cfun))
6111 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6112 for (gimple_phi_iterator gpi = gsi_start_phis (fin_bb);
6113 !gsi_end_p (gpi); gsi_next (&gpi))
6115 gimple_phi phi = gpi.phi ();
6116 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6117 ep, UNKNOWN_LOCATION);
6120 gsi = gsi_last_bb (entry_bb);
6123 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6124 t = fold_convert (itype, t);
6125 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6126 true, GSI_SAME_STMT);
6128 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6129 t = fold_convert (itype, t);
6130 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6131 true, GSI_SAME_STMT);
6133 n1 = fd->loop.n1;
6134 n2 = fd->loop.n2;
6135 step = fd->loop.step;
6136 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6138 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6139 OMP_CLAUSE__LOOPTEMP_);
6140 gcc_assert (innerc);
6141 n1 = OMP_CLAUSE_DECL (innerc);
6142 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6143 OMP_CLAUSE__LOOPTEMP_);
6144 gcc_assert (innerc);
6145 n2 = OMP_CLAUSE_DECL (innerc);
6147 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6148 true, NULL_TREE, true, GSI_SAME_STMT);
6149 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6150 true, NULL_TREE, true, GSI_SAME_STMT);
6151 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6152 true, NULL_TREE, true, GSI_SAME_STMT);
6154 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6155 t = fold_build2 (PLUS_EXPR, itype, step, t);
6156 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6157 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6158 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6159 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6160 fold_build1 (NEGATE_EXPR, itype, t),
6161 fold_build1 (NEGATE_EXPR, itype, step));
6162 else
6163 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6164 t = fold_convert (itype, t);
6165 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6167 q = create_tmp_reg (itype, "q");
6168 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
6169 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6170 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
6172 tt = create_tmp_reg (itype, "tt");
6173 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
6174 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6175 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
6177 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
6178 gimple_cond cond_stmt = gimple_build_cond_empty (t);
6179 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6181 second_bb = split_block (entry_bb, cond_stmt)->dest;
6182 gsi = gsi_last_bb (second_bb);
6183 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6185 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
6186 GSI_SAME_STMT);
6187 gimple_assign assign_stmt =
6188 gimple_build_assign_with_ops (PLUS_EXPR, q, q,
6189 build_int_cst (itype, 1));
6190 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6192 third_bb = split_block (second_bb, assign_stmt)->dest;
6193 gsi = gsi_last_bb (third_bb);
6194 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6196 t = build2 (MULT_EXPR, itype, q, threadid);
6197 t = build2 (PLUS_EXPR, itype, t, tt);
6198 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6200 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6201 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6203 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6204 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6206 /* Remove the GIMPLE_OMP_FOR statement. */
6207 gsi_remove (&gsi, true);
6209 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6210 gsi = gsi_start_bb (seq_start_bb);
6212 tree startvar = fd->loop.v;
6213 tree endvar = NULL_TREE;
6215 if (gimple_omp_for_combined_p (fd->for_stmt))
6217 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6218 ? gimple_omp_parallel_clauses (inner_stmt)
6219 : gimple_omp_for_clauses (inner_stmt);
6220 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6221 gcc_assert (innerc);
6222 startvar = OMP_CLAUSE_DECL (innerc);
6223 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6224 OMP_CLAUSE__LOOPTEMP_);
6225 gcc_assert (innerc);
6226 endvar = OMP_CLAUSE_DECL (innerc);
6228 t = fold_convert (itype, s0);
6229 t = fold_build2 (MULT_EXPR, itype, t, step);
6230 if (POINTER_TYPE_P (type))
6231 t = fold_build_pointer_plus (n1, t);
6232 else
6233 t = fold_build2 (PLUS_EXPR, type, t, n1);
6234 t = fold_convert (TREE_TYPE (startvar), t);
6235 t = force_gimple_operand_gsi (&gsi, t,
6236 DECL_P (startvar)
6237 && TREE_ADDRESSABLE (startvar),
6238 NULL_TREE, false, GSI_CONTINUE_LINKING);
6239 assign_stmt = gimple_build_assign (startvar, t);
6240 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6242 t = fold_convert (itype, e0);
6243 t = fold_build2 (MULT_EXPR, itype, t, step);
6244 if (POINTER_TYPE_P (type))
6245 t = fold_build_pointer_plus (n1, t);
6246 else
6247 t = fold_build2 (PLUS_EXPR, type, t, n1);
6248 t = fold_convert (TREE_TYPE (startvar), t);
6249 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6250 false, GSI_CONTINUE_LINKING);
6251 if (endvar)
6253 assign_stmt = gimple_build_assign (endvar, e);
6254 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6255 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6256 assign_stmt = gimple_build_assign (fd->loop.v, e);
6257 else
6258 assign_stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6259 NULL_TREE);
6260 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6262 if (fd->collapse > 1)
6263 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6265 if (!broken_loop)
6267 /* The code controlling the sequential loop replaces the
6268 GIMPLE_OMP_CONTINUE. */
6269 gsi = gsi_last_bb (cont_bb);
6270 gimple_omp_continue cont_stmt =
6271 as_a <gimple_omp_continue> (gsi_stmt (gsi));
6272 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
6273 vmain = gimple_omp_continue_control_use (cont_stmt);
6274 vback = gimple_omp_continue_control_def (cont_stmt);
6276 if (!gimple_omp_for_combined_p (fd->for_stmt))
6278 if (POINTER_TYPE_P (type))
6279 t = fold_build_pointer_plus (vmain, step);
6280 else
6281 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6282 t = force_gimple_operand_gsi (&gsi, t,
6283 DECL_P (vback)
6284 && TREE_ADDRESSABLE (vback),
6285 NULL_TREE, true, GSI_SAME_STMT);
6286 assign_stmt = gimple_build_assign (vback, t);
6287 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6289 t = build2 (fd->loop.cond_code, boolean_type_node,
6290 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6291 ? t : vback, e);
6292 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6295 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6296 gsi_remove (&gsi, true);
6298 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6299 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6302 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6303 gsi = gsi_last_bb (exit_bb);
6304 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6306 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6307 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6309 gsi_remove (&gsi, true);
6311 /* Connect all the blocks. */
6312 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6313 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6314 ep = find_edge (entry_bb, second_bb);
6315 ep->flags = EDGE_TRUE_VALUE;
6316 ep->probability = REG_BR_PROB_BASE / 4;
6317 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6318 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6320 if (!broken_loop)
6322 ep = find_edge (cont_bb, body_bb);
6323 if (gimple_omp_for_combined_p (fd->for_stmt))
6325 remove_edge (ep);
6326 ep = NULL;
6328 else if (fd->collapse > 1)
6330 remove_edge (ep);
6331 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6333 else
6334 ep->flags = EDGE_TRUE_VALUE;
6335 find_edge (cont_bb, fin_bb)->flags
6336 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6339 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6340 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6341 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6343 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6344 recompute_dominator (CDI_DOMINATORS, body_bb));
6345 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6346 recompute_dominator (CDI_DOMINATORS, fin_bb));
6348 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6350 struct loop *loop = alloc_loop ();
6351 loop->header = body_bb;
6352 if (collapse_bb == NULL)
6353 loop->latch = cont_bb;
6354 add_loop (loop, body_bb->loop_father);
6359 /* A subroutine of expand_omp_for. Generate code for a parallel
6360 loop with static schedule and a specified chunk size. Given
6361 parameters:
6363 for (V = N1; V cond N2; V += STEP) BODY;
6365 where COND is "<" or ">", we generate pseudocode
6367 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6368 if (cond is <)
6369 adj = STEP - 1;
6370 else
6371 adj = STEP + 1;
6372 if ((__typeof (V)) -1 > 0 && cond is >)
6373 n = -(adj + N2 - N1) / -STEP;
6374 else
6375 n = (adj + N2 - N1) / STEP;
6376 trip = 0;
6377 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6378 here so that V is defined
6379 if the loop is not entered
6381 s0 = (trip * nthreads + threadid) * CHUNK;
6382 e0 = min(s0 + CHUNK, n);
6383 if (s0 < n) goto L1; else goto L4;
6385 V = s0 * STEP + N1;
6386 e = e0 * STEP + N1;
6388 BODY;
6389 V += STEP;
6390 if (V cond e) goto L2; else goto L3;
6392 trip += 1;
6393 goto L0;
6397 static void
6398 expand_omp_for_static_chunk (struct omp_region *region,
6399 struct omp_for_data *fd, gimple inner_stmt)
6401 tree n, s0, e0, e, t;
6402 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6403 tree type, itype, vmain, vback, vextra;
6404 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6405 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6406 gimple_stmt_iterator gsi;
6407 edge se;
6408 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6409 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6410 bool broken_loop = region->cont == NULL;
6411 tree *counts = NULL;
6412 tree n1, n2, step;
6414 itype = type = TREE_TYPE (fd->loop.v);
6415 if (POINTER_TYPE_P (type))
6416 itype = signed_type_for (type);
6418 entry_bb = region->entry;
6419 se = split_block (entry_bb, last_stmt (entry_bb));
6420 entry_bb = se->src;
6421 iter_part_bb = se->dest;
6422 cont_bb = region->cont;
6423 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6424 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6425 gcc_assert (broken_loop
6426 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6427 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6428 body_bb = single_succ (seq_start_bb);
6429 if (!broken_loop)
6431 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6432 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6433 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6435 exit_bb = region->exit;
6437 /* Trip and adjustment setup goes in ENTRY_BB. */
6438 gsi = gsi_last_bb (entry_bb);
6439 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6441 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6443 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6444 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6447 if (fd->collapse > 1)
6449 int first_zero_iter = -1;
6450 basic_block l2_dom_bb = NULL;
6452 counts = XALLOCAVEC (tree, fd->collapse);
6453 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6454 fin_bb, first_zero_iter,
6455 l2_dom_bb);
6456 t = NULL_TREE;
6458 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6459 t = integer_one_node;
6460 else
6461 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6462 fold_convert (type, fd->loop.n1),
6463 fold_convert (type, fd->loop.n2));
6464 if (fd->collapse == 1
6465 && TYPE_UNSIGNED (type)
6466 && (t == NULL_TREE || !integer_onep (t)))
6468 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6469 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6470 true, GSI_SAME_STMT);
6471 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6472 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6473 true, GSI_SAME_STMT);
6474 gimple_cond cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6475 NULL_TREE, NULL_TREE);
6476 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6477 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6478 expand_omp_regimplify_p, NULL, NULL)
6479 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6480 expand_omp_regimplify_p, NULL, NULL))
6482 gsi = gsi_for_stmt (cond_stmt);
6483 gimple_regimplify_operands (cond_stmt, &gsi);
6485 se = split_block (entry_bb, cond_stmt);
6486 se->flags = EDGE_TRUE_VALUE;
6487 entry_bb = se->dest;
6488 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6489 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6490 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6491 if (gimple_in_ssa_p (cfun))
6493 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6494 for (gimple_phi_iterator gpi = gsi_start_phis (fin_bb);
6495 !gsi_end_p (gpi); gsi_next (&gpi))
6497 gimple_phi phi = gpi.phi ();
6498 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6499 se, UNKNOWN_LOCATION);
6502 gsi = gsi_last_bb (entry_bb);
6505 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6506 t = fold_convert (itype, t);
6507 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6508 true, GSI_SAME_STMT);
6510 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6511 t = fold_convert (itype, t);
6512 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6513 true, GSI_SAME_STMT);
6515 n1 = fd->loop.n1;
6516 n2 = fd->loop.n2;
6517 step = fd->loop.step;
6518 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6520 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6521 OMP_CLAUSE__LOOPTEMP_);
6522 gcc_assert (innerc);
6523 n1 = OMP_CLAUSE_DECL (innerc);
6524 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6525 OMP_CLAUSE__LOOPTEMP_);
6526 gcc_assert (innerc);
6527 n2 = OMP_CLAUSE_DECL (innerc);
6529 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6530 true, NULL_TREE, true, GSI_SAME_STMT);
6531 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6532 true, NULL_TREE, true, GSI_SAME_STMT);
6533 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6534 true, NULL_TREE, true, GSI_SAME_STMT);
6535 fd->chunk_size
6536 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
6537 true, NULL_TREE, true, GSI_SAME_STMT);
6539 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6540 t = fold_build2 (PLUS_EXPR, itype, step, t);
6541 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6542 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6543 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6544 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6545 fold_build1 (NEGATE_EXPR, itype, t),
6546 fold_build1 (NEGATE_EXPR, itype, step));
6547 else
6548 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6549 t = fold_convert (itype, t);
6550 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6551 true, GSI_SAME_STMT);
6553 trip_var = create_tmp_reg (itype, ".trip");
6554 if (gimple_in_ssa_p (cfun))
6556 trip_init = make_ssa_name (trip_var, NULL);
6557 trip_main = make_ssa_name (trip_var, NULL);
6558 trip_back = make_ssa_name (trip_var, NULL);
6560 else
6562 trip_init = trip_var;
6563 trip_main = trip_var;
6564 trip_back = trip_var;
6567 gimple_assign assign_stmt =
6568 gimple_build_assign (trip_init, build_int_cst (itype, 0));
6569 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6571 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6572 t = fold_build2 (MULT_EXPR, itype, t, step);
6573 if (POINTER_TYPE_P (type))
6574 t = fold_build_pointer_plus (n1, t);
6575 else
6576 t = fold_build2 (PLUS_EXPR, type, t, n1);
6577 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6578 true, GSI_SAME_STMT);
6580 /* Remove the GIMPLE_OMP_FOR. */
6581 gsi_remove (&gsi, true);
6583 /* Iteration space partitioning goes in ITER_PART_BB. */
6584 gsi = gsi_last_bb (iter_part_bb);
6586 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6587 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6588 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6589 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6590 false, GSI_CONTINUE_LINKING);
6592 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6593 t = fold_build2 (MIN_EXPR, itype, t, n);
6594 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6595 false, GSI_CONTINUE_LINKING);
6597 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6598 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6600 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6601 gsi = gsi_start_bb (seq_start_bb);
6603 tree startvar = fd->loop.v;
6604 tree endvar = NULL_TREE;
6606 if (gimple_omp_for_combined_p (fd->for_stmt))
6608 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6609 ? gimple_omp_parallel_clauses (inner_stmt)
6610 : gimple_omp_for_clauses (inner_stmt);
6611 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6612 gcc_assert (innerc);
6613 startvar = OMP_CLAUSE_DECL (innerc);
6614 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6615 OMP_CLAUSE__LOOPTEMP_);
6616 gcc_assert (innerc);
6617 endvar = OMP_CLAUSE_DECL (innerc);
6620 t = fold_convert (itype, s0);
6621 t = fold_build2 (MULT_EXPR, itype, t, step);
6622 if (POINTER_TYPE_P (type))
6623 t = fold_build_pointer_plus (n1, t);
6624 else
6625 t = fold_build2 (PLUS_EXPR, type, t, n1);
6626 t = fold_convert (TREE_TYPE (startvar), t);
6627 t = force_gimple_operand_gsi (&gsi, t,
6628 DECL_P (startvar)
6629 && TREE_ADDRESSABLE (startvar),
6630 NULL_TREE, false, GSI_CONTINUE_LINKING);
6631 assign_stmt = gimple_build_assign (startvar, t);
6632 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6634 t = fold_convert (itype, e0);
6635 t = fold_build2 (MULT_EXPR, itype, t, step);
6636 if (POINTER_TYPE_P (type))
6637 t = fold_build_pointer_plus (n1, t);
6638 else
6639 t = fold_build2 (PLUS_EXPR, type, t, n1);
6640 t = fold_convert (TREE_TYPE (startvar), t);
6641 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6642 false, GSI_CONTINUE_LINKING);
6643 if (endvar)
6645 assign_stmt = gimple_build_assign (endvar, e);
6646 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6647 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6648 assign_stmt = gimple_build_assign (fd->loop.v, e);
6649 else
6650 assign_stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6651 NULL_TREE);
6652 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6654 if (fd->collapse > 1)
6655 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6657 if (!broken_loop)
6659 /* The code controlling the sequential loop goes in CONT_BB,
6660 replacing the GIMPLE_OMP_CONTINUE. */
6661 gsi = gsi_last_bb (cont_bb);
6662 gimple_omp_continue cont_stmt =
6663 as_a <gimple_omp_continue> (gsi_stmt (gsi));
6664 vmain = gimple_omp_continue_control_use (cont_stmt);
6665 vback = gimple_omp_continue_control_def (cont_stmt);
6667 if (!gimple_omp_for_combined_p (fd->for_stmt))
6669 if (POINTER_TYPE_P (type))
6670 t = fold_build_pointer_plus (vmain, step);
6671 else
6672 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6673 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
6674 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6675 true, GSI_SAME_STMT);
6676 assign_stmt = gimple_build_assign (vback, t);
6677 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6679 t = build2 (fd->loop.cond_code, boolean_type_node,
6680 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6681 ? t : vback, e);
6682 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6685 /* Remove GIMPLE_OMP_CONTINUE. */
6686 gsi_remove (&gsi, true);
6688 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6689 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6691 /* Trip update code goes into TRIP_UPDATE_BB. */
6692 gsi = gsi_start_bb (trip_update_bb);
6694 t = build_int_cst (itype, 1);
6695 t = build2 (PLUS_EXPR, itype, trip_main, t);
6696 assign_stmt = gimple_build_assign (trip_back, t);
6697 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6700 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6701 gsi = gsi_last_bb (exit_bb);
6702 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6704 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6705 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6707 gsi_remove (&gsi, true);
6709 /* Connect the new blocks. */
6710 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6711 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6713 if (!broken_loop)
6715 se = find_edge (cont_bb, body_bb);
6716 if (gimple_omp_for_combined_p (fd->for_stmt))
6718 remove_edge (se);
6719 se = NULL;
6721 else if (fd->collapse > 1)
6723 remove_edge (se);
6724 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6726 else
6727 se->flags = EDGE_TRUE_VALUE;
6728 find_edge (cont_bb, trip_update_bb)->flags
6729 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6731 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6734 if (gimple_in_ssa_p (cfun))
6736 gimple_phi_iterator psi;
6737 gimple_phi phi;
6738 edge re, ene;
6739 edge_var_map *vm;
6740 size_t i;
6742 gcc_assert (fd->collapse == 1 && !broken_loop);
6744 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6745 remove arguments of the phi nodes in fin_bb. We need to create
6746 appropriate phi nodes in iter_part_bb instead. */
6747 se = single_pred_edge (fin_bb);
6748 re = single_succ_edge (trip_update_bb);
6749 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
6750 ene = single_succ_edge (entry_bb);
6752 psi = gsi_start_phis (fin_bb);
6753 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6754 gsi_next (&psi), ++i)
6756 gimple_phi nphi;
6757 source_location locus;
6759 phi = psi.phi ();
6760 t = gimple_phi_result (phi);
6761 gcc_assert (t == redirect_edge_var_map_result (vm));
6762 nphi = create_phi_node (t, iter_part_bb);
6764 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6765 locus = gimple_phi_arg_location_from_edge (phi, se);
6767 /* A special case -- fd->loop.v is not yet computed in
6768 iter_part_bb, we need to use vextra instead. */
6769 if (t == fd->loop.v)
6770 t = vextra;
6771 add_phi_arg (nphi, t, ene, locus);
6772 locus = redirect_edge_var_map_location (vm);
6773 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6775 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6776 redirect_edge_var_map_clear (re);
6777 while (1)
6779 psi = gsi_start_phis (fin_bb);
6780 if (gsi_end_p (psi))
6781 break;
6782 remove_phi_node (&psi, false);
6785 /* Make phi node for trip. */
6786 phi = create_phi_node (trip_main, iter_part_bb);
6787 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6788 UNKNOWN_LOCATION);
6789 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6790 UNKNOWN_LOCATION);
6793 if (!broken_loop)
6794 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6795 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6796 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6797 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6798 recompute_dominator (CDI_DOMINATORS, fin_bb));
6799 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6800 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6801 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6802 recompute_dominator (CDI_DOMINATORS, body_bb));
6804 if (!broken_loop)
6806 struct loop *trip_loop = alloc_loop ();
6807 trip_loop->header = iter_part_bb;
6808 trip_loop->latch = trip_update_bb;
6809 add_loop (trip_loop, iter_part_bb->loop_father);
6811 if (!gimple_omp_for_combined_p (fd->for_stmt))
6813 struct loop *loop = alloc_loop ();
6814 loop->header = body_bb;
6815 if (collapse_bb == NULL)
6816 loop->latch = cont_bb;
6817 add_loop (loop, trip_loop);
6822 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
6823 Given parameters:
6824 for (V = N1; V cond N2; V += STEP) BODY;
6826 where COND is "<" or ">" or "!=", we generate pseudocode
6828 for (ind_var = low; ind_var < high; ind_var++)
6830 V = n1 + (ind_var * STEP)
6832 <BODY>
6835 In the above pseudocode, low and high are function parameters of the
6836 child function. In the function below, we are inserting a temp.
6837 variable that will be making a call to two OMP functions that will not be
6838 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
6839 with _Cilk_for). These functions are replaced with low and high
6840 by the function that handles taskreg. */
6843 static void
6844 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
6846 bool broken_loop = region->cont == NULL;
6847 basic_block entry_bb = region->entry;
6848 basic_block cont_bb = region->cont;
6850 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6851 gcc_assert (broken_loop
6852 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6853 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6854 basic_block l1_bb, l2_bb;
6856 if (!broken_loop)
6858 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6859 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6860 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6861 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6863 else
6865 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6866 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6867 l2_bb = single_succ (l1_bb);
6869 basic_block exit_bb = region->exit;
6870 basic_block l2_dom_bb = NULL;
6872 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
6874 /* Below statements until the "tree high_val = ..." are pseudo statements
6875 used to pass information to be used by expand_omp_taskreg.
6876 low_val and high_val will be replaced by the __low and __high
6877 parameter from the child function.
6879 The call_exprs part is a place-holder, it is mainly used
6880 to distinctly identify to the top-level part that this is
6881 where we should put low and high (reasoning given in header
6882 comment). */
6884 tree child_fndecl
6885 = gimple_omp_parallel_child_fn (
6886 as_a <gimple_omp_parallel> (last_stmt (region->outer->entry)));
6887 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
6888 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
6890 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
6891 high_val = t;
6892 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
6893 low_val = t;
6895 gcc_assert (low_val && high_val);
6897 tree type = TREE_TYPE (low_val);
6898 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
6899 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6901 /* Not needed in SSA form right now. */
6902 gcc_assert (!gimple_in_ssa_p (cfun));
6903 if (l2_dom_bb == NULL)
6904 l2_dom_bb = l1_bb;
6906 tree n1 = low_val;
6907 tree n2 = high_val;
6909 gimple stmt = gimple_build_assign (ind_var, n1);
6911 /* Replace the GIMPLE_OMP_FOR statement. */
6912 gsi_replace (&gsi, stmt, true);
6914 if (!broken_loop)
6916 /* Code to control the increment goes in the CONT_BB. */
6917 gsi = gsi_last_bb (cont_bb);
6918 stmt = gsi_stmt (gsi);
6919 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6920 stmt = gimple_build_assign_with_ops (PLUS_EXPR, ind_var, ind_var,
6921 build_one_cst (type));
6923 /* Replace GIMPLE_OMP_CONTINUE. */
6924 gsi_replace (&gsi, stmt, true);
6927 /* Emit the condition in L1_BB. */
6928 gsi = gsi_after_labels (l1_bb);
6929 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
6930 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
6931 fd->loop.step);
6932 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
6933 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6934 fd->loop.n1, fold_convert (sizetype, t));
6935 else
6936 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6937 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
6938 t = fold_convert (TREE_TYPE (fd->loop.v), t);
6939 expand_omp_build_assign (&gsi, fd->loop.v, t);
6941 /* The condition is always '<' since the runtime will fill in the low
6942 and high values. */
6943 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
6944 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6946 /* Remove GIMPLE_OMP_RETURN. */
6947 gsi = gsi_last_bb (exit_bb);
6948 gsi_remove (&gsi, true);
6950 /* Connect the new blocks. */
6951 remove_edge (FALLTHRU_EDGE (entry_bb));
6953 edge e, ne;
6954 if (!broken_loop)
6956 remove_edge (BRANCH_EDGE (entry_bb));
6957 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6959 e = BRANCH_EDGE (l1_bb);
6960 ne = FALLTHRU_EDGE (l1_bb);
6961 e->flags = EDGE_TRUE_VALUE;
6963 else
6965 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6967 ne = single_succ_edge (l1_bb);
6968 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6971 ne->flags = EDGE_FALSE_VALUE;
6972 e->probability = REG_BR_PROB_BASE * 7 / 8;
6973 ne->probability = REG_BR_PROB_BASE / 8;
6975 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6976 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6977 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6979 if (!broken_loop)
6981 struct loop *loop = alloc_loop ();
6982 loop->header = l1_bb;
6983 loop->latch = cont_bb;
6984 add_loop (loop, l1_bb->loop_father);
6985 loop->safelen = INT_MAX;
6988 /* Pick the correct library function based on the precision of the
6989 induction variable type. */
6990 tree lib_fun = NULL_TREE;
6991 if (TYPE_PRECISION (type) == 32)
6992 lib_fun = cilk_for_32_fndecl;
6993 else if (TYPE_PRECISION (type) == 64)
6994 lib_fun = cilk_for_64_fndecl;
6995 else
6996 gcc_unreachable ();
6998 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
7000 /* WS_ARGS contains the library function flavor to call:
7001 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
7002 user-defined grain value. If the user does not define one, then zero
7003 is passed in by the parser. */
7004 vec_alloc (region->ws_args, 2);
7005 region->ws_args->quick_push (lib_fun);
7006 region->ws_args->quick_push (fd->chunk_size);
7009 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
7010 loop. Given parameters:
7012 for (V = N1; V cond N2; V += STEP) BODY;
7014 where COND is "<" or ">", we generate pseudocode
7016 V = N1;
7017 goto L1;
7019 BODY;
7020 V += STEP;
7022 if (V cond N2) goto L0; else goto L2;
7025 For collapsed loops, given parameters:
7026 collapse(3)
7027 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
7028 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
7029 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
7030 BODY;
7032 we generate pseudocode
7034 if (cond3 is <)
7035 adj = STEP3 - 1;
7036 else
7037 adj = STEP3 + 1;
7038 count3 = (adj + N32 - N31) / STEP3;
7039 if (cond2 is <)
7040 adj = STEP2 - 1;
7041 else
7042 adj = STEP2 + 1;
7043 count2 = (adj + N22 - N21) / STEP2;
7044 if (cond1 is <)
7045 adj = STEP1 - 1;
7046 else
7047 adj = STEP1 + 1;
7048 count1 = (adj + N12 - N11) / STEP1;
7049 count = count1 * count2 * count3;
7050 V = 0;
7051 V1 = N11;
7052 V2 = N21;
7053 V3 = N31;
7054 goto L1;
7056 BODY;
7057 V += 1;
7058 V3 += STEP3;
7059 V2 += (V3 cond3 N32) ? 0 : STEP2;
7060 V3 = (V3 cond3 N32) ? V3 : N31;
7061 V1 += (V2 cond2 N22) ? 0 : STEP1;
7062 V2 = (V2 cond2 N22) ? V2 : N21;
7064 if (V < count) goto L0; else goto L2;
7069 static void
7070 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
7072 tree type, t;
7073 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
7074 gimple_stmt_iterator gsi;
7075 gimple stmt;
7076 bool broken_loop = region->cont == NULL;
7077 edge e, ne;
7078 tree *counts = NULL;
7079 int i;
7080 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7081 OMP_CLAUSE_SAFELEN);
7082 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7083 OMP_CLAUSE__SIMDUID_);
7084 tree n1, n2;
7086 type = TREE_TYPE (fd->loop.v);
7087 entry_bb = region->entry;
7088 cont_bb = region->cont;
7089 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7090 gcc_assert (broken_loop
7091 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7092 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7093 if (!broken_loop)
7095 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7096 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7097 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7098 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7100 else
7102 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7103 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7104 l2_bb = single_succ (l1_bb);
7106 exit_bb = region->exit;
7107 l2_dom_bb = NULL;
7109 gsi = gsi_last_bb (entry_bb);
7111 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7112 /* Not needed in SSA form right now. */
7113 gcc_assert (!gimple_in_ssa_p (cfun));
7114 if (fd->collapse > 1)
7116 int first_zero_iter = -1;
7117 basic_block zero_iter_bb = l2_bb;
7119 counts = XALLOCAVEC (tree, fd->collapse);
7120 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
7121 zero_iter_bb, first_zero_iter,
7122 l2_dom_bb);
7124 if (l2_dom_bb == NULL)
7125 l2_dom_bb = l1_bb;
7127 n1 = fd->loop.n1;
7128 n2 = fd->loop.n2;
7129 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7131 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7132 OMP_CLAUSE__LOOPTEMP_);
7133 gcc_assert (innerc);
7134 n1 = OMP_CLAUSE_DECL (innerc);
7135 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7136 OMP_CLAUSE__LOOPTEMP_);
7137 gcc_assert (innerc);
7138 n2 = OMP_CLAUSE_DECL (innerc);
7139 expand_omp_build_assign (&gsi, fd->loop.v,
7140 fold_convert (type, n1));
7141 if (fd->collapse > 1)
7143 gsi_prev (&gsi);
7144 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
7145 gsi_next (&gsi);
7148 else
7150 expand_omp_build_assign (&gsi, fd->loop.v,
7151 fold_convert (type, fd->loop.n1));
7152 if (fd->collapse > 1)
7153 for (i = 0; i < fd->collapse; i++)
7155 tree itype = TREE_TYPE (fd->loops[i].v);
7156 if (POINTER_TYPE_P (itype))
7157 itype = signed_type_for (itype);
7158 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
7159 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7163 /* Remove the GIMPLE_OMP_FOR statement. */
7164 gsi_remove (&gsi, true);
7166 if (!broken_loop)
7168 /* Code to control the increment goes in the CONT_BB. */
7169 gsi = gsi_last_bb (cont_bb);
7170 stmt = gsi_stmt (gsi);
7171 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7173 if (POINTER_TYPE_P (type))
7174 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
7175 else
7176 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
7177 expand_omp_build_assign (&gsi, fd->loop.v, t);
7179 if (fd->collapse > 1)
7181 i = fd->collapse - 1;
7182 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
7184 t = fold_convert (sizetype, fd->loops[i].step);
7185 t = fold_build_pointer_plus (fd->loops[i].v, t);
7187 else
7189 t = fold_convert (TREE_TYPE (fd->loops[i].v),
7190 fd->loops[i].step);
7191 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
7192 fd->loops[i].v, t);
7194 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7196 for (i = fd->collapse - 1; i > 0; i--)
7198 tree itype = TREE_TYPE (fd->loops[i].v);
7199 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
7200 if (POINTER_TYPE_P (itype2))
7201 itype2 = signed_type_for (itype2);
7202 t = build3 (COND_EXPR, itype2,
7203 build2 (fd->loops[i].cond_code, boolean_type_node,
7204 fd->loops[i].v,
7205 fold_convert (itype, fd->loops[i].n2)),
7206 build_int_cst (itype2, 0),
7207 fold_convert (itype2, fd->loops[i - 1].step));
7208 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
7209 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
7210 else
7211 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
7212 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
7214 t = build3 (COND_EXPR, itype,
7215 build2 (fd->loops[i].cond_code, boolean_type_node,
7216 fd->loops[i].v,
7217 fold_convert (itype, fd->loops[i].n2)),
7218 fd->loops[i].v,
7219 fold_convert (itype, fd->loops[i].n1));
7220 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7224 /* Remove GIMPLE_OMP_CONTINUE. */
7225 gsi_remove (&gsi, true);
7228 /* Emit the condition in L1_BB. */
7229 gsi = gsi_start_bb (l1_bb);
7231 t = fold_convert (type, n2);
7232 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7233 false, GSI_CONTINUE_LINKING);
7234 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
7235 stmt = gimple_build_cond_empty (t);
7236 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7237 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
7238 NULL, NULL)
7239 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
7240 NULL, NULL))
7242 gsi = gsi_for_stmt (stmt);
7243 gimple_regimplify_operands (stmt, &gsi);
7246 /* Remove GIMPLE_OMP_RETURN. */
7247 gsi = gsi_last_bb (exit_bb);
7248 gsi_remove (&gsi, true);
7250 /* Connect the new blocks. */
7251 remove_edge (FALLTHRU_EDGE (entry_bb));
7253 if (!broken_loop)
7255 remove_edge (BRANCH_EDGE (entry_bb));
7256 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7258 e = BRANCH_EDGE (l1_bb);
7259 ne = FALLTHRU_EDGE (l1_bb);
7260 e->flags = EDGE_TRUE_VALUE;
7262 else
7264 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7266 ne = single_succ_edge (l1_bb);
7267 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7270 ne->flags = EDGE_FALSE_VALUE;
7271 e->probability = REG_BR_PROB_BASE * 7 / 8;
7272 ne->probability = REG_BR_PROB_BASE / 8;
7274 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7275 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7276 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7278 if (!broken_loop)
7280 struct loop *loop = alloc_loop ();
7281 loop->header = l1_bb;
7282 loop->latch = cont_bb;
7283 add_loop (loop, l1_bb->loop_father);
7284 if (safelen == NULL_TREE)
7285 loop->safelen = INT_MAX;
7286 else
7288 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
7289 if (TREE_CODE (safelen) != INTEGER_CST)
7290 loop->safelen = 0;
7291 else if (!tree_fits_uhwi_p (safelen)
7292 || tree_to_uhwi (safelen) > INT_MAX)
7293 loop->safelen = INT_MAX;
7294 else
7295 loop->safelen = tree_to_uhwi (safelen);
7296 if (loop->safelen == 1)
7297 loop->safelen = 0;
7299 if (simduid)
7301 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
7302 cfun->has_simduid_loops = true;
7304 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7305 the loop. */
7306 if ((flag_tree_loop_vectorize
7307 || (!global_options_set.x_flag_tree_loop_vectorize
7308 && !global_options_set.x_flag_tree_vectorize))
7309 && flag_tree_loop_optimize
7310 && loop->safelen > 1)
7312 loop->force_vectorize = true;
7313 cfun->has_force_vectorize_loops = true;
7319 /* Expand the OpenMP loop defined by REGION. */
7321 static void
7322 expand_omp_for (struct omp_region *region, gimple inner_stmt)
7324 struct omp_for_data fd;
7325 struct omp_for_data_loop *loops;
7327 loops
7328 = (struct omp_for_data_loop *)
7329 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
7330 * sizeof (struct omp_for_data_loop));
7331 extract_omp_for_data (as_a <gimple_omp_for> (last_stmt (region->entry)),
7332 &fd, loops);
7333 region->sched_kind = fd.sched_kind;
7335 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
7336 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7337 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7338 if (region->cont)
7340 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
7341 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7342 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7344 else
7345 /* If there isn't a continue then this is a degerate case where
7346 the introduction of abnormal edges during lowering will prevent
7347 original loops from being detected. Fix that up. */
7348 loops_state_set (LOOPS_NEED_FIXUP);
7350 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
7351 expand_omp_simd (region, &fd);
7352 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
7353 expand_cilk_for (region, &fd);
7354 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
7355 && !fd.have_ordered)
7357 if (fd.chunk_size == NULL)
7358 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
7359 else
7360 expand_omp_for_static_chunk (region, &fd, inner_stmt);
7362 else
7364 int fn_index, start_ix, next_ix;
7366 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
7367 == GF_OMP_FOR_KIND_FOR);
7368 if (fd.chunk_size == NULL
7369 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
7370 fd.chunk_size = integer_zero_node;
7371 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
7372 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
7373 ? 3 : fd.sched_kind;
7374 fn_index += fd.have_ordered * 4;
7375 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
7376 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
7377 if (fd.iter_type == long_long_unsigned_type_node)
7379 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
7380 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
7381 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
7382 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
7384 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
7385 (enum built_in_function) next_ix, inner_stmt);
7388 if (gimple_in_ssa_p (cfun))
7389 update_ssa (TODO_update_ssa_only_virtuals);
7393 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7395 v = GOMP_sections_start (n);
7397 switch (v)
7399 case 0:
7400 goto L2;
7401 case 1:
7402 section 1;
7403 goto L1;
7404 case 2:
7406 case n:
7408 default:
7409 abort ();
7412 v = GOMP_sections_next ();
7413 goto L0;
7415 reduction;
7417 If this is a combined parallel sections, replace the call to
7418 GOMP_sections_start with call to GOMP_sections_next. */
7420 static void
7421 expand_omp_sections (struct omp_region *region)
7423 tree t, u, vin = NULL, vmain, vnext, l2;
7424 unsigned len;
7425 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7426 gimple_stmt_iterator si, switch_si;
7427 gimple_omp_sections sections_stmt;
7428 gimple stmt;
7429 gimple_omp_continue cont;
7430 edge_iterator ei;
7431 edge e;
7432 struct omp_region *inner;
7433 unsigned i, casei;
7434 bool exit_reachable = region->cont != NULL;
7436 gcc_assert (region->exit != NULL);
7437 entry_bb = region->entry;
7438 l0_bb = single_succ (entry_bb);
7439 l1_bb = region->cont;
7440 l2_bb = region->exit;
7441 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7442 l2 = gimple_block_label (l2_bb);
7443 else
7445 /* This can happen if there are reductions. */
7446 len = EDGE_COUNT (l0_bb->succs);
7447 gcc_assert (len > 0);
7448 e = EDGE_SUCC (l0_bb, len - 1);
7449 si = gsi_last_bb (e->dest);
7450 l2 = NULL_TREE;
7451 if (gsi_end_p (si)
7452 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7453 l2 = gimple_block_label (e->dest);
7454 else
7455 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7457 si = gsi_last_bb (e->dest);
7458 if (gsi_end_p (si)
7459 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7461 l2 = gimple_block_label (e->dest);
7462 break;
7466 if (exit_reachable)
7467 default_bb = create_empty_bb (l1_bb->prev_bb);
7468 else
7469 default_bb = create_empty_bb (l0_bb);
7471 /* We will build a switch() with enough cases for all the
7472 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7473 and a default case to abort if something goes wrong. */
7474 len = EDGE_COUNT (l0_bb->succs);
7476 /* Use vec::quick_push on label_vec throughout, since we know the size
7477 in advance. */
7478 auto_vec<tree> label_vec (len);
7480 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7481 GIMPLE_OMP_SECTIONS statement. */
7482 si = gsi_last_bb (entry_bb);
7483 sections_stmt = as_a <gimple_omp_sections> (gsi_stmt (si));
7484 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7485 vin = gimple_omp_sections_control (sections_stmt);
7486 if (!is_combined_parallel (region))
7488 /* If we are not inside a combined parallel+sections region,
7489 call GOMP_sections_start. */
7490 t = build_int_cst (unsigned_type_node, len - 1);
7491 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7492 stmt = gimple_build_call (u, 1, t);
7494 else
7496 /* Otherwise, call GOMP_sections_next. */
7497 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7498 stmt = gimple_build_call (u, 0);
7500 gimple_call_set_lhs (stmt, vin);
7501 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7502 gsi_remove (&si, true);
7504 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7505 L0_BB. */
7506 switch_si = gsi_last_bb (l0_bb);
7507 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
7508 if (exit_reachable)
7510 cont = as_a <gimple_omp_continue> (last_stmt (l1_bb));
7511 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
7512 vmain = gimple_omp_continue_control_use (cont);
7513 vnext = gimple_omp_continue_control_def (cont);
7515 else
7517 vmain = vin;
7518 vnext = NULL_TREE;
7521 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
7522 label_vec.quick_push (t);
7523 i = 1;
7525 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7526 for (inner = region->inner, casei = 1;
7527 inner;
7528 inner = inner->next, i++, casei++)
7530 basic_block s_entry_bb, s_exit_bb;
7532 /* Skip optional reduction region. */
7533 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7535 --i;
7536 --casei;
7537 continue;
7540 s_entry_bb = inner->entry;
7541 s_exit_bb = inner->exit;
7543 t = gimple_block_label (s_entry_bb);
7544 u = build_int_cst (unsigned_type_node, casei);
7545 u = build_case_label (u, NULL, t);
7546 label_vec.quick_push (u);
7548 si = gsi_last_bb (s_entry_bb);
7549 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7550 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7551 gsi_remove (&si, true);
7552 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7554 if (s_exit_bb == NULL)
7555 continue;
7557 si = gsi_last_bb (s_exit_bb);
7558 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7559 gsi_remove (&si, true);
7561 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7564 /* Error handling code goes in DEFAULT_BB. */
7565 t = gimple_block_label (default_bb);
7566 u = build_case_label (NULL, NULL, t);
7567 make_edge (l0_bb, default_bb, 0);
7568 add_bb_to_loop (default_bb, current_loops->tree_root);
7570 stmt = gimple_build_switch (vmain, u, label_vec);
7571 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7572 gsi_remove (&switch_si, true);
7574 si = gsi_start_bb (default_bb);
7575 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7576 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7578 if (exit_reachable)
7580 tree bfn_decl;
7582 /* Code to get the next section goes in L1_BB. */
7583 si = gsi_last_bb (l1_bb);
7584 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7586 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7587 stmt = gimple_build_call (bfn_decl, 0);
7588 gimple_call_set_lhs (stmt, vnext);
7589 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7590 gsi_remove (&si, true);
7592 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7595 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7596 si = gsi_last_bb (l2_bb);
7597 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7598 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7599 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7600 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7601 else
7602 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7603 stmt = gimple_build_call (t, 0);
7604 if (gimple_omp_return_lhs (gsi_stmt (si)))
7605 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7606 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7607 gsi_remove (&si, true);
7609 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7613 /* Expand code for an OpenMP single directive. We've already expanded
7614 much of the code, here we simply place the GOMP_barrier call. */
7616 static void
7617 expand_omp_single (struct omp_region *region)
7619 basic_block entry_bb, exit_bb;
7620 gimple_stmt_iterator si;
7622 entry_bb = region->entry;
7623 exit_bb = region->exit;
7625 si = gsi_last_bb (entry_bb);
7626 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7627 gsi_remove (&si, true);
7628 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7630 si = gsi_last_bb (exit_bb);
7631 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7633 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7634 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7636 gsi_remove (&si, true);
7637 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7641 /* Generic expansion for OpenMP synchronization directives: master,
7642 ordered and critical. All we need to do here is remove the entry
7643 and exit markers for REGION. */
7645 static void
7646 expand_omp_synch (struct omp_region *region)
7648 basic_block entry_bb, exit_bb;
7649 gimple_stmt_iterator si;
7651 entry_bb = region->entry;
7652 exit_bb = region->exit;
7654 si = gsi_last_bb (entry_bb);
7655 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7656 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7657 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7658 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7659 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7660 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7661 gsi_remove (&si, true);
7662 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7664 if (exit_bb)
7666 si = gsi_last_bb (exit_bb);
7667 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7668 gsi_remove (&si, true);
7669 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7673 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7674 operation as a normal volatile load. */
7676 static bool
7677 expand_omp_atomic_load (basic_block load_bb, tree addr,
7678 tree loaded_val, int index)
7680 enum built_in_function tmpbase;
7681 gimple_stmt_iterator gsi;
7682 basic_block store_bb;
7683 location_t loc;
7684 gimple stmt;
7685 tree decl, call, type, itype;
7687 gsi = gsi_last_bb (load_bb);
7688 stmt = gsi_stmt (gsi);
7689 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7690 loc = gimple_location (stmt);
7692 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7693 is smaller than word size, then expand_atomic_load assumes that the load
7694 is atomic. We could avoid the builtin entirely in this case. */
7696 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7697 decl = builtin_decl_explicit (tmpbase);
7698 if (decl == NULL_TREE)
7699 return false;
7701 type = TREE_TYPE (loaded_val);
7702 itype = TREE_TYPE (TREE_TYPE (decl));
7704 call = build_call_expr_loc (loc, decl, 2, addr,
7705 build_int_cst (NULL,
7706 gimple_omp_atomic_seq_cst_p (stmt)
7707 ? MEMMODEL_SEQ_CST
7708 : MEMMODEL_RELAXED));
7709 if (!useless_type_conversion_p (type, itype))
7710 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7711 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7713 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7714 gsi_remove (&gsi, true);
7716 store_bb = single_succ (load_bb);
7717 gsi = gsi_last_bb (store_bb);
7718 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7719 gsi_remove (&gsi, true);
7721 if (gimple_in_ssa_p (cfun))
7722 update_ssa (TODO_update_ssa_no_phi);
7724 return true;
7727 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7728 operation as a normal volatile store. */
7730 static bool
7731 expand_omp_atomic_store (basic_block load_bb, tree addr,
7732 tree loaded_val, tree stored_val, int index)
7734 enum built_in_function tmpbase;
7735 gimple_stmt_iterator gsi;
7736 basic_block store_bb = single_succ (load_bb);
7737 location_t loc;
7738 gimple stmt;
7739 tree decl, call, type, itype;
7740 enum machine_mode imode;
7741 bool exchange;
7743 gsi = gsi_last_bb (load_bb);
7744 stmt = gsi_stmt (gsi);
7745 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7747 /* If the load value is needed, then this isn't a store but an exchange. */
7748 exchange = gimple_omp_atomic_need_value_p (stmt);
7750 gsi = gsi_last_bb (store_bb);
7751 stmt = gsi_stmt (gsi);
7752 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7753 loc = gimple_location (stmt);
7755 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7756 is smaller than word size, then expand_atomic_store assumes that the store
7757 is atomic. We could avoid the builtin entirely in this case. */
7759 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7760 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7761 decl = builtin_decl_explicit (tmpbase);
7762 if (decl == NULL_TREE)
7763 return false;
7765 type = TREE_TYPE (stored_val);
7767 /* Dig out the type of the function's second argument. */
7768 itype = TREE_TYPE (decl);
7769 itype = TYPE_ARG_TYPES (itype);
7770 itype = TREE_CHAIN (itype);
7771 itype = TREE_VALUE (itype);
7772 imode = TYPE_MODE (itype);
7774 if (exchange && !can_atomic_exchange_p (imode, true))
7775 return false;
7777 if (!useless_type_conversion_p (itype, type))
7778 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7779 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7780 build_int_cst (NULL,
7781 gimple_omp_atomic_seq_cst_p (stmt)
7782 ? MEMMODEL_SEQ_CST
7783 : MEMMODEL_RELAXED));
7784 if (exchange)
7786 if (!useless_type_conversion_p (type, itype))
7787 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7788 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7791 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7792 gsi_remove (&gsi, true);
7794 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7795 gsi = gsi_last_bb (load_bb);
7796 gsi_remove (&gsi, true);
7798 if (gimple_in_ssa_p (cfun))
7799 update_ssa (TODO_update_ssa_no_phi);
7801 return true;
7804 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7805 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7806 size of the data type, and thus usable to find the index of the builtin
7807 decl. Returns false if the expression is not of the proper form. */
7809 static bool
7810 expand_omp_atomic_fetch_op (basic_block load_bb,
7811 tree addr, tree loaded_val,
7812 tree stored_val, int index)
7814 enum built_in_function oldbase, newbase, tmpbase;
7815 tree decl, itype, call;
7816 tree lhs, rhs;
7817 basic_block store_bb = single_succ (load_bb);
7818 gimple_stmt_iterator gsi;
7819 gimple stmt;
7820 location_t loc;
7821 enum tree_code code;
7822 bool need_old, need_new;
7823 enum machine_mode imode;
7824 bool seq_cst;
7826 /* We expect to find the following sequences:
7828 load_bb:
7829 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7831 store_bb:
7832 val = tmp OP something; (or: something OP tmp)
7833 GIMPLE_OMP_STORE (val)
7835 ???FIXME: Allow a more flexible sequence.
7836 Perhaps use data flow to pick the statements.
7840 gsi = gsi_after_labels (store_bb);
7841 stmt = gsi_stmt (gsi);
7842 loc = gimple_location (stmt);
7843 if (!is_gimple_assign (stmt))
7844 return false;
7845 gsi_next (&gsi);
7846 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7847 return false;
7848 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7849 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7850 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7851 gcc_checking_assert (!need_old || !need_new);
7853 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7854 return false;
7856 /* Check for one of the supported fetch-op operations. */
7857 code = gimple_assign_rhs_code (stmt);
7858 switch (code)
7860 case PLUS_EXPR:
7861 case POINTER_PLUS_EXPR:
7862 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7863 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7864 break;
7865 case MINUS_EXPR:
7866 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7867 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7868 break;
7869 case BIT_AND_EXPR:
7870 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7871 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7872 break;
7873 case BIT_IOR_EXPR:
7874 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7875 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7876 break;
7877 case BIT_XOR_EXPR:
7878 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7879 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7880 break;
7881 default:
7882 return false;
7885 /* Make sure the expression is of the proper form. */
7886 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7887 rhs = gimple_assign_rhs2 (stmt);
7888 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7889 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7890 rhs = gimple_assign_rhs1 (stmt);
7891 else
7892 return false;
7894 tmpbase = ((enum built_in_function)
7895 ((need_new ? newbase : oldbase) + index + 1));
7896 decl = builtin_decl_explicit (tmpbase);
7897 if (decl == NULL_TREE)
7898 return false;
7899 itype = TREE_TYPE (TREE_TYPE (decl));
7900 imode = TYPE_MODE (itype);
7902 /* We could test all of the various optabs involved, but the fact of the
7903 matter is that (with the exception of i486 vs i586 and xadd) all targets
7904 that support any atomic operaton optab also implements compare-and-swap.
7905 Let optabs.c take care of expanding any compare-and-swap loop. */
7906 if (!can_compare_and_swap_p (imode, true))
7907 return false;
7909 gsi = gsi_last_bb (load_bb);
7910 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7912 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7913 It only requires that the operation happen atomically. Thus we can
7914 use the RELAXED memory model. */
7915 call = build_call_expr_loc (loc, decl, 3, addr,
7916 fold_convert_loc (loc, itype, rhs),
7917 build_int_cst (NULL,
7918 seq_cst ? MEMMODEL_SEQ_CST
7919 : MEMMODEL_RELAXED));
7921 if (need_old || need_new)
7923 lhs = need_old ? loaded_val : stored_val;
7924 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7925 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7927 else
7928 call = fold_convert_loc (loc, void_type_node, call);
7929 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7930 gsi_remove (&gsi, true);
7932 gsi = gsi_last_bb (store_bb);
7933 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7934 gsi_remove (&gsi, true);
7935 gsi = gsi_last_bb (store_bb);
7936 gsi_remove (&gsi, true);
7938 if (gimple_in_ssa_p (cfun))
7939 update_ssa (TODO_update_ssa_no_phi);
7941 return true;
7944 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7946 oldval = *addr;
7947 repeat:
7948 newval = rhs; // with oldval replacing *addr in rhs
7949 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7950 if (oldval != newval)
7951 goto repeat;
7953 INDEX is log2 of the size of the data type, and thus usable to find the
7954 index of the builtin decl. */
7956 static bool
7957 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7958 tree addr, tree loaded_val, tree stored_val,
7959 int index)
7961 tree loadedi, storedi, initial, new_storedi, old_vali;
7962 tree type, itype, cmpxchg, iaddr;
7963 gimple_stmt_iterator si;
7964 basic_block loop_header = single_succ (load_bb);
7965 gimple phi, stmt;
7966 edge e;
7967 enum built_in_function fncode;
7969 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7970 order to use the RELAXED memory model effectively. */
7971 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7972 + index + 1);
7973 cmpxchg = builtin_decl_explicit (fncode);
7974 if (cmpxchg == NULL_TREE)
7975 return false;
7976 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7977 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7979 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7980 return false;
7982 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7983 si = gsi_last_bb (load_bb);
7984 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7986 /* For floating-point values, we'll need to view-convert them to integers
7987 so that we can perform the atomic compare and swap. Simplify the
7988 following code by always setting up the "i"ntegral variables. */
7989 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7991 tree iaddr_val;
7993 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7994 true), NULL);
7995 iaddr_val
7996 = force_gimple_operand_gsi (&si,
7997 fold_convert (TREE_TYPE (iaddr), addr),
7998 false, NULL_TREE, true, GSI_SAME_STMT);
7999 stmt = gimple_build_assign (iaddr, iaddr_val);
8000 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8001 loadedi = create_tmp_var (itype, NULL);
8002 if (gimple_in_ssa_p (cfun))
8003 loadedi = make_ssa_name (loadedi, NULL);
8005 else
8007 iaddr = addr;
8008 loadedi = loaded_val;
8011 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
8012 tree loaddecl = builtin_decl_explicit (fncode);
8013 if (loaddecl)
8014 initial
8015 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
8016 build_call_expr (loaddecl, 2, iaddr,
8017 build_int_cst (NULL_TREE,
8018 MEMMODEL_RELAXED)));
8019 else
8020 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
8021 build_int_cst (TREE_TYPE (iaddr), 0));
8023 initial
8024 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
8025 GSI_SAME_STMT);
8027 /* Move the value to the LOADEDI temporary. */
8028 if (gimple_in_ssa_p (cfun))
8030 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
8031 phi = create_phi_node (loadedi, loop_header);
8032 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
8033 initial);
8035 else
8036 gsi_insert_before (&si,
8037 gimple_build_assign (loadedi, initial),
8038 GSI_SAME_STMT);
8039 if (loadedi != loaded_val)
8041 gimple_stmt_iterator gsi2;
8042 tree x;
8044 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
8045 gsi2 = gsi_start_bb (loop_header);
8046 if (gimple_in_ssa_p (cfun))
8048 gimple_assign stmt;
8049 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8050 true, GSI_SAME_STMT);
8051 stmt = gimple_build_assign (loaded_val, x);
8052 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
8054 else
8056 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
8057 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8058 true, GSI_SAME_STMT);
8061 gsi_remove (&si, true);
8063 si = gsi_last_bb (store_bb);
8064 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8066 if (iaddr == addr)
8067 storedi = stored_val;
8068 else
8069 storedi =
8070 force_gimple_operand_gsi (&si,
8071 build1 (VIEW_CONVERT_EXPR, itype,
8072 stored_val), true, NULL_TREE, true,
8073 GSI_SAME_STMT);
8075 /* Build the compare&swap statement. */
8076 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
8077 new_storedi = force_gimple_operand_gsi (&si,
8078 fold_convert (TREE_TYPE (loadedi),
8079 new_storedi),
8080 true, NULL_TREE,
8081 true, GSI_SAME_STMT);
8083 if (gimple_in_ssa_p (cfun))
8084 old_vali = loadedi;
8085 else
8087 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
8088 stmt = gimple_build_assign (old_vali, loadedi);
8089 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8091 stmt = gimple_build_assign (loadedi, new_storedi);
8092 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8095 /* Note that we always perform the comparison as an integer, even for
8096 floating point. This allows the atomic operation to properly
8097 succeed even with NaNs and -0.0. */
8098 stmt = gimple_build_cond_empty
8099 (build2 (NE_EXPR, boolean_type_node,
8100 new_storedi, old_vali));
8101 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8103 /* Update cfg. */
8104 e = single_succ_edge (store_bb);
8105 e->flags &= ~EDGE_FALLTHRU;
8106 e->flags |= EDGE_FALSE_VALUE;
8108 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
8110 /* Copy the new value to loadedi (we already did that before the condition
8111 if we are not in SSA). */
8112 if (gimple_in_ssa_p (cfun))
8114 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
8115 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
8118 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
8119 gsi_remove (&si, true);
8121 struct loop *loop = alloc_loop ();
8122 loop->header = loop_header;
8123 loop->latch = store_bb;
8124 add_loop (loop, loop_header->loop_father);
8126 if (gimple_in_ssa_p (cfun))
8127 update_ssa (TODO_update_ssa_no_phi);
8129 return true;
8132 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8134 GOMP_atomic_start ();
8135 *addr = rhs;
8136 GOMP_atomic_end ();
8138 The result is not globally atomic, but works so long as all parallel
8139 references are within #pragma omp atomic directives. According to
8140 responses received from omp@openmp.org, appears to be within spec.
8141 Which makes sense, since that's how several other compilers handle
8142 this situation as well.
8143 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
8144 expanding. STORED_VAL is the operand of the matching
8145 GIMPLE_OMP_ATOMIC_STORE.
8147 We replace
8148 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
8149 loaded_val = *addr;
8151 and replace
8152 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
8153 *addr = stored_val;
8156 static bool
8157 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
8158 tree addr, tree loaded_val, tree stored_val)
8160 gimple_stmt_iterator si;
8161 gimple_assign stmt;
8162 tree t;
8164 si = gsi_last_bb (load_bb);
8165 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8167 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
8168 t = build_call_expr (t, 0);
8169 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8171 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
8172 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8173 gsi_remove (&si, true);
8175 si = gsi_last_bb (store_bb);
8176 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8178 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
8179 stored_val);
8180 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8182 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
8183 t = build_call_expr (t, 0);
8184 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8185 gsi_remove (&si, true);
8187 if (gimple_in_ssa_p (cfun))
8188 update_ssa (TODO_update_ssa_no_phi);
8189 return true;
8192 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
8193 using expand_omp_atomic_fetch_op. If it failed, we try to
8194 call expand_omp_atomic_pipeline, and if it fails too, the
8195 ultimate fallback is wrapping the operation in a mutex
8196 (expand_omp_atomic_mutex). REGION is the atomic region built
8197 by build_omp_regions_1(). */
8199 static void
8200 expand_omp_atomic (struct omp_region *region)
8202 basic_block load_bb = region->entry, store_bb = region->exit;
8203 gimple_omp_atomic_load load =
8204 as_a <gimple_omp_atomic_load> (last_stmt (load_bb));
8205 gimple_omp_atomic_store store =
8206 as_a <gimple_omp_atomic_store> (last_stmt (store_bb));
8207 tree loaded_val = gimple_omp_atomic_load_lhs (load);
8208 tree addr = gimple_omp_atomic_load_rhs (load);
8209 tree stored_val = gimple_omp_atomic_store_val (store);
8210 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8211 HOST_WIDE_INT index;
8213 /* Make sure the type is one of the supported sizes. */
8214 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
8215 index = exact_log2 (index);
8216 if (index >= 0 && index <= 4)
8218 unsigned int align = TYPE_ALIGN_UNIT (type);
8220 /* __sync builtins require strict data alignment. */
8221 if (exact_log2 (align) >= index)
8223 /* Atomic load. */
8224 if (loaded_val == stored_val
8225 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8226 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8227 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8228 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
8229 return;
8231 /* Atomic store. */
8232 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8233 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8234 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8235 && store_bb == single_succ (load_bb)
8236 && first_stmt (store_bb) == store
8237 && expand_omp_atomic_store (load_bb, addr, loaded_val,
8238 stored_val, index))
8239 return;
8241 /* When possible, use specialized atomic update functions. */
8242 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
8243 && store_bb == single_succ (load_bb)
8244 && expand_omp_atomic_fetch_op (load_bb, addr,
8245 loaded_val, stored_val, index))
8246 return;
8248 /* If we don't have specialized __sync builtins, try and implement
8249 as a compare and swap loop. */
8250 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
8251 loaded_val, stored_val, index))
8252 return;
8256 /* The ultimate fallback is wrapping the operation in a mutex. */
8257 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
8261 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
8263 static void
8264 expand_omp_target (struct omp_region *region)
8266 basic_block entry_bb, exit_bb, new_bb;
8267 struct function *child_cfun = NULL;
8268 tree child_fn = NULL_TREE, block, t;
8269 gimple_stmt_iterator gsi;
8270 gimple_omp_target entry_stmt;
8271 gimple stmt;
8272 edge e;
8274 entry_stmt = as_a <gimple_omp_target> (last_stmt (region->entry));
8275 new_bb = region->entry;
8276 int kind = gimple_omp_target_kind (entry_stmt);
8277 if (kind == GF_OMP_TARGET_KIND_REGION)
8279 child_fn = gimple_omp_target_child_fn (entry_stmt);
8280 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
8283 entry_bb = region->entry;
8284 exit_bb = region->exit;
8286 if (kind == GF_OMP_TARGET_KIND_REGION)
8288 unsigned srcidx, dstidx, num;
8290 /* If the target region needs data sent from the parent
8291 function, then the very first statement (except possible
8292 tree profile counter updates) of the parallel body
8293 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
8294 &.OMP_DATA_O is passed as an argument to the child function,
8295 we need to replace it with the argument as seen by the child
8296 function.
8298 In most cases, this will end up being the identity assignment
8299 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
8300 a function call that has been inlined, the original PARM_DECL
8301 .OMP_DATA_I may have been converted into a different local
8302 variable. In which case, we need to keep the assignment. */
8303 if (gimple_omp_target_data_arg (entry_stmt))
8305 basic_block entry_succ_bb = single_succ (entry_bb);
8306 gimple_stmt_iterator gsi;
8307 tree arg;
8308 gimple tgtcopy_stmt = NULL;
8309 tree sender
8310 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
8312 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
8314 gcc_assert (!gsi_end_p (gsi));
8315 stmt = gsi_stmt (gsi);
8316 if (gimple_code (stmt) != GIMPLE_ASSIGN)
8317 continue;
8319 if (gimple_num_ops (stmt) == 2)
8321 tree arg = gimple_assign_rhs1 (stmt);
8323 /* We're ignoring the subcode because we're
8324 effectively doing a STRIP_NOPS. */
8326 if (TREE_CODE (arg) == ADDR_EXPR
8327 && TREE_OPERAND (arg, 0) == sender)
8329 tgtcopy_stmt = stmt;
8330 break;
8335 gcc_assert (tgtcopy_stmt != NULL);
8336 arg = DECL_ARGUMENTS (child_fn);
8338 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
8339 gsi_remove (&gsi, true);
8342 /* Declare local variables needed in CHILD_CFUN. */
8343 block = DECL_INITIAL (child_fn);
8344 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
8345 /* The gimplifier could record temporaries in target block
8346 rather than in containing function's local_decls chain,
8347 which would mean cgraph missed finalizing them. Do it now. */
8348 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
8349 if (TREE_CODE (t) == VAR_DECL
8350 && TREE_STATIC (t)
8351 && !DECL_EXTERNAL (t))
8352 varpool_node::finalize_decl (t);
8353 DECL_SAVED_TREE (child_fn) = NULL;
8354 /* We'll create a CFG for child_fn, so no gimple body is needed. */
8355 gimple_set_body (child_fn, NULL);
8356 TREE_USED (block) = 1;
8358 /* Reset DECL_CONTEXT on function arguments. */
8359 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
8360 DECL_CONTEXT (t) = child_fn;
8362 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
8363 so that it can be moved to the child function. */
8364 gsi = gsi_last_bb (entry_bb);
8365 stmt = gsi_stmt (gsi);
8366 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
8367 && gimple_omp_target_kind (stmt)
8368 == GF_OMP_TARGET_KIND_REGION);
8369 gsi_remove (&gsi, true);
8370 e = split_block (entry_bb, stmt);
8371 entry_bb = e->dest;
8372 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8374 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
8375 if (exit_bb)
8377 gsi = gsi_last_bb (exit_bb);
8378 gcc_assert (!gsi_end_p (gsi)
8379 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
8380 stmt = gimple_build_return (NULL);
8381 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8382 gsi_remove (&gsi, true);
8385 /* Move the target region into CHILD_CFUN. */
8387 block = gimple_block (entry_stmt);
8389 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
8390 if (exit_bb)
8391 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
8392 /* When the OMP expansion process cannot guarantee an up-to-date
8393 loop tree arrange for the child function to fixup loops. */
8394 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8395 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8397 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8398 num = vec_safe_length (child_cfun->local_decls);
8399 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8401 t = (*child_cfun->local_decls)[srcidx];
8402 if (DECL_CONTEXT (t) == cfun->decl)
8403 continue;
8404 if (srcidx != dstidx)
8405 (*child_cfun->local_decls)[dstidx] = t;
8406 dstidx++;
8408 if (dstidx != num)
8409 vec_safe_truncate (child_cfun->local_decls, dstidx);
8411 /* Inform the callgraph about the new function. */
8412 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
8413 cgraph_node::add_new_function (child_fn, true);
8415 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8416 fixed in a following pass. */
8417 push_cfun (child_cfun);
8418 cgraph_edge::rebuild_edges ();
8420 /* Some EH regions might become dead, see PR34608. If
8421 pass_cleanup_cfg isn't the first pass to happen with the
8422 new child, these dead EH edges might cause problems.
8423 Clean them up now. */
8424 if (flag_exceptions)
8426 basic_block bb;
8427 bool changed = false;
8429 FOR_EACH_BB_FN (bb, cfun)
8430 changed |= gimple_purge_dead_eh_edges (bb);
8431 if (changed)
8432 cleanup_tree_cfg ();
8434 pop_cfun ();
8437 /* Emit a library call to launch the target region, or do data
8438 transfers. */
8439 tree t1, t2, t3, t4, device, cond, c, clauses;
8440 enum built_in_function start_ix;
8441 location_t clause_loc;
8443 clauses = gimple_omp_target_clauses (entry_stmt);
8445 if (kind == GF_OMP_TARGET_KIND_REGION)
8446 start_ix = BUILT_IN_GOMP_TARGET;
8447 else if (kind == GF_OMP_TARGET_KIND_DATA)
8448 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8449 else
8450 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8452 /* By default, the value of DEVICE is -1 (let runtime library choose)
8453 and there is no conditional. */
8454 cond = NULL_TREE;
8455 device = build_int_cst (integer_type_node, -1);
8457 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
8458 if (c)
8459 cond = OMP_CLAUSE_IF_EXPR (c);
8461 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
8462 if (c)
8464 device = OMP_CLAUSE_DEVICE_ID (c);
8465 clause_loc = OMP_CLAUSE_LOCATION (c);
8467 else
8468 clause_loc = gimple_location (entry_stmt);
8470 /* Ensure 'device' is of the correct type. */
8471 device = fold_convert_loc (clause_loc, integer_type_node, device);
8473 /* If we found the clause 'if (cond)', build
8474 (cond ? device : -2). */
8475 if (cond)
8477 cond = gimple_boolify (cond);
8479 basic_block cond_bb, then_bb, else_bb;
8480 edge e;
8481 tree tmp_var;
8483 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
8484 if (kind != GF_OMP_TARGET_KIND_REGION)
8486 gsi = gsi_last_bb (new_bb);
8487 gsi_prev (&gsi);
8488 e = split_block (new_bb, gsi_stmt (gsi));
8490 else
8491 e = split_block (new_bb, NULL);
8492 cond_bb = e->src;
8493 new_bb = e->dest;
8494 remove_edge (e);
8496 then_bb = create_empty_bb (cond_bb);
8497 else_bb = create_empty_bb (then_bb);
8498 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
8499 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
8501 stmt = gimple_build_cond_empty (cond);
8502 gsi = gsi_last_bb (cond_bb);
8503 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8505 gsi = gsi_start_bb (then_bb);
8506 stmt = gimple_build_assign (tmp_var, device);
8507 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8509 gsi = gsi_start_bb (else_bb);
8510 stmt = gimple_build_assign (tmp_var,
8511 build_int_cst (integer_type_node, -2));
8512 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8514 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
8515 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
8516 add_bb_to_loop (then_bb, cond_bb->loop_father);
8517 add_bb_to_loop (else_bb, cond_bb->loop_father);
8518 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
8519 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
8521 device = tmp_var;
8524 gsi = gsi_last_bb (new_bb);
8525 t = gimple_omp_target_data_arg (entry_stmt);
8526 if (t == NULL)
8528 t1 = size_zero_node;
8529 t2 = build_zero_cst (ptr_type_node);
8530 t3 = t2;
8531 t4 = t2;
8533 else
8535 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8536 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8537 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8538 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8539 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8542 gimple g;
8543 /* FIXME: This will be address of
8544 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8545 symbol, as soon as the linker plugin is able to create it for us. */
8546 tree openmp_target = build_zero_cst (ptr_type_node);
8547 if (kind == GF_OMP_TARGET_KIND_REGION)
8549 tree fnaddr = build_fold_addr_expr (child_fn);
8550 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8551 device, fnaddr, openmp_target, t1, t2, t3, t4);
8553 else
8554 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8555 device, openmp_target, t1, t2, t3, t4);
8556 gimple_set_location (g, gimple_location (entry_stmt));
8557 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8558 if (kind != GF_OMP_TARGET_KIND_REGION)
8560 g = gsi_stmt (gsi);
8561 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8562 gsi_remove (&gsi, true);
8564 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8566 gsi = gsi_last_bb (region->exit);
8567 g = gsi_stmt (gsi);
8568 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8569 gsi_remove (&gsi, true);
8574 /* Expand the parallel region tree rooted at REGION. Expansion
8575 proceeds in depth-first order. Innermost regions are expanded
8576 first. This way, parallel regions that require a new function to
8577 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8578 internal dependencies in their body. */
8580 static void
8581 expand_omp (struct omp_region *region)
8583 while (region)
8585 location_t saved_location;
8586 gimple inner_stmt = NULL;
8588 /* First, determine whether this is a combined parallel+workshare
8589 region. */
8590 if (region->type == GIMPLE_OMP_PARALLEL)
8591 determine_parallel_type (region);
8593 if (region->type == GIMPLE_OMP_FOR
8594 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8595 inner_stmt = last_stmt (region->inner->entry);
8597 if (region->inner)
8598 expand_omp (region->inner);
8600 saved_location = input_location;
8601 if (gimple_has_location (last_stmt (region->entry)))
8602 input_location = gimple_location (last_stmt (region->entry));
8604 switch (region->type)
8606 case GIMPLE_OMP_PARALLEL:
8607 case GIMPLE_OMP_TASK:
8608 expand_omp_taskreg (region);
8609 break;
8611 case GIMPLE_OMP_FOR:
8612 expand_omp_for (region, inner_stmt);
8613 break;
8615 case GIMPLE_OMP_SECTIONS:
8616 expand_omp_sections (region);
8617 break;
8619 case GIMPLE_OMP_SECTION:
8620 /* Individual omp sections are handled together with their
8621 parent GIMPLE_OMP_SECTIONS region. */
8622 break;
8624 case GIMPLE_OMP_SINGLE:
8625 expand_omp_single (region);
8626 break;
8628 case GIMPLE_OMP_MASTER:
8629 case GIMPLE_OMP_TASKGROUP:
8630 case GIMPLE_OMP_ORDERED:
8631 case GIMPLE_OMP_CRITICAL:
8632 case GIMPLE_OMP_TEAMS:
8633 expand_omp_synch (region);
8634 break;
8636 case GIMPLE_OMP_ATOMIC_LOAD:
8637 expand_omp_atomic (region);
8638 break;
8640 case GIMPLE_OMP_TARGET:
8641 expand_omp_target (region);
8642 break;
8644 default:
8645 gcc_unreachable ();
8648 input_location = saved_location;
8649 region = region->next;
8654 /* Helper for build_omp_regions. Scan the dominator tree starting at
8655 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8656 true, the function ends once a single tree is built (otherwise, whole
8657 forest of OMP constructs may be built). */
8659 static void
8660 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8661 bool single_tree)
8663 gimple_stmt_iterator gsi;
8664 gimple stmt;
8665 basic_block son;
8667 gsi = gsi_last_bb (bb);
8668 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8670 struct omp_region *region;
8671 enum gimple_code code;
8673 stmt = gsi_stmt (gsi);
8674 code = gimple_code (stmt);
8675 if (code == GIMPLE_OMP_RETURN)
8677 /* STMT is the return point out of region PARENT. Mark it
8678 as the exit point and make PARENT the immediately
8679 enclosing region. */
8680 gcc_assert (parent);
8681 region = parent;
8682 region->exit = bb;
8683 parent = parent->outer;
8685 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8687 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8688 GIMPLE_OMP_RETURN, but matches with
8689 GIMPLE_OMP_ATOMIC_LOAD. */
8690 gcc_assert (parent);
8691 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8692 region = parent;
8693 region->exit = bb;
8694 parent = parent->outer;
8697 else if (code == GIMPLE_OMP_CONTINUE)
8699 gcc_assert (parent);
8700 parent->cont = bb;
8702 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8704 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8705 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8708 else if (code == GIMPLE_OMP_TARGET
8709 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8710 new_omp_region (bb, code, parent);
8711 else
8713 /* Otherwise, this directive becomes the parent for a new
8714 region. */
8715 region = new_omp_region (bb, code, parent);
8716 parent = region;
8720 if (single_tree && !parent)
8721 return;
8723 for (son = first_dom_son (CDI_DOMINATORS, bb);
8724 son;
8725 son = next_dom_son (CDI_DOMINATORS, son))
8726 build_omp_regions_1 (son, parent, single_tree);
8729 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8730 root_omp_region. */
8732 static void
8733 build_omp_regions_root (basic_block root)
8735 gcc_assert (root_omp_region == NULL);
8736 build_omp_regions_1 (root, NULL, true);
8737 gcc_assert (root_omp_region != NULL);
8740 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8742 void
8743 omp_expand_local (basic_block head)
8745 build_omp_regions_root (head);
8746 if (dump_file && (dump_flags & TDF_DETAILS))
8748 fprintf (dump_file, "\nOMP region tree\n\n");
8749 dump_omp_region (dump_file, root_omp_region, 0);
8750 fprintf (dump_file, "\n");
8753 remove_exit_barriers (root_omp_region);
8754 expand_omp (root_omp_region);
8756 free_omp_regions ();
8759 /* Scan the CFG and build a tree of OMP regions. Return the root of
8760 the OMP region tree. */
8762 static void
8763 build_omp_regions (void)
8765 gcc_assert (root_omp_region == NULL);
8766 calculate_dominance_info (CDI_DOMINATORS);
8767 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8770 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8772 static unsigned int
8773 execute_expand_omp (void)
8775 build_omp_regions ();
8777 if (!root_omp_region)
8778 return 0;
8780 if (dump_file)
8782 fprintf (dump_file, "\nOMP region tree\n\n");
8783 dump_omp_region (dump_file, root_omp_region, 0);
8784 fprintf (dump_file, "\n");
8787 remove_exit_barriers (root_omp_region);
8789 expand_omp (root_omp_region);
8791 cleanup_tree_cfg ();
8793 free_omp_regions ();
8795 return 0;
8798 /* OMP expansion -- the default pass, run before creation of SSA form. */
8800 namespace {
8802 const pass_data pass_data_expand_omp =
8804 GIMPLE_PASS, /* type */
8805 "ompexp", /* name */
8806 OPTGROUP_NONE, /* optinfo_flags */
8807 TV_NONE, /* tv_id */
8808 PROP_gimple_any, /* properties_required */
8809 0, /* properties_provided */
8810 0, /* properties_destroyed */
8811 0, /* todo_flags_start */
8812 0, /* todo_flags_finish */
8815 class pass_expand_omp : public gimple_opt_pass
8817 public:
8818 pass_expand_omp (gcc::context *ctxt)
8819 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8822 /* opt_pass methods: */
8823 virtual bool gate (function *)
8825 return ((flag_openmp != 0 || flag_openmp_simd != 0
8826 || flag_cilkplus != 0) && !seen_error ());
8829 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8831 }; // class pass_expand_omp
8833 } // anon namespace
8835 gimple_opt_pass *
8836 make_pass_expand_omp (gcc::context *ctxt)
8838 return new pass_expand_omp (ctxt);
8841 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8843 /* If ctx is a worksharing context inside of a cancellable parallel
8844 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8845 and conditional branch to parallel's cancel_label to handle
8846 cancellation in the implicit barrier. */
8848 static void
8849 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8851 gimple omp_return = gimple_seq_last_stmt (*body);
8852 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8853 if (gimple_omp_return_nowait_p (omp_return))
8854 return;
8855 if (ctx->outer
8856 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8857 && ctx->outer->cancellable)
8859 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
8860 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
8861 tree lhs = create_tmp_var (c_bool_type, NULL);
8862 gimple_omp_return_set_lhs (omp_return, lhs);
8863 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8864 gimple g = gimple_build_cond (NE_EXPR, lhs,
8865 fold_convert (c_bool_type,
8866 boolean_false_node),
8867 ctx->outer->cancel_label, fallthru_label);
8868 gimple_seq_add_stmt (body, g);
8869 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8873 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8874 CTX is the enclosing OMP context for the current statement. */
8876 static void
8877 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8879 tree block, control;
8880 gimple_stmt_iterator tgsi;
8881 gimple_omp_sections stmt;
8882 gimple t;
8883 gimple_bind new_stmt, bind;
8884 gimple_seq ilist, dlist, olist, new_body;
8886 stmt = as_a <gimple_omp_sections> (gsi_stmt (*gsi_p));
8888 push_gimplify_context ();
8890 dlist = NULL;
8891 ilist = NULL;
8892 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8893 &ilist, &dlist, ctx, NULL);
8895 new_body = gimple_omp_body (stmt);
8896 gimple_omp_set_body (stmt, NULL);
8897 tgsi = gsi_start (new_body);
8898 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8900 omp_context *sctx;
8901 gimple sec_start;
8903 sec_start = gsi_stmt (tgsi);
8904 sctx = maybe_lookup_ctx (sec_start);
8905 gcc_assert (sctx);
8907 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8908 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8909 GSI_CONTINUE_LINKING);
8910 gimple_omp_set_body (sec_start, NULL);
8912 if (gsi_one_before_end_p (tgsi))
8914 gimple_seq l = NULL;
8915 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8916 &l, ctx);
8917 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8918 gimple_omp_section_set_last (sec_start);
8921 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8922 GSI_CONTINUE_LINKING);
8925 block = make_node (BLOCK);
8926 bind = gimple_build_bind (NULL, new_body, block);
8928 olist = NULL;
8929 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
8931 block = make_node (BLOCK);
8932 new_stmt = gimple_build_bind (NULL, NULL, block);
8933 gsi_replace (gsi_p, new_stmt, true);
8935 pop_gimplify_context (new_stmt);
8936 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8937 BLOCK_VARS (block) = gimple_bind_vars (bind);
8938 if (BLOCK_VARS (block))
8939 TREE_USED (block) = 1;
8941 new_body = NULL;
8942 gimple_seq_add_seq (&new_body, ilist);
8943 gimple_seq_add_stmt (&new_body, stmt);
8944 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8945 gimple_seq_add_stmt (&new_body, bind);
8947 control = create_tmp_var (unsigned_type_node, ".section");
8948 t = gimple_build_omp_continue (control, control);
8949 gimple_omp_sections_set_control (stmt, control);
8950 gimple_seq_add_stmt (&new_body, t);
8952 gimple_seq_add_seq (&new_body, olist);
8953 if (ctx->cancellable)
8954 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
8955 gimple_seq_add_seq (&new_body, dlist);
8957 new_body = maybe_catch_exception (new_body);
8959 t = gimple_build_omp_return
8960 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8961 OMP_CLAUSE_NOWAIT));
8962 gimple_seq_add_stmt (&new_body, t);
8963 maybe_add_implicit_barrier_cancel (ctx, &new_body);
8965 gimple_bind_set_body (new_stmt, new_body);
8969 /* A subroutine of lower_omp_single. Expand the simple form of
8970 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8972 if (GOMP_single_start ())
8973 BODY;
8974 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8976 FIXME. It may be better to delay expanding the logic of this until
8977 pass_expand_omp. The expanded logic may make the job more difficult
8978 to a synchronization analysis pass. */
8980 static void
8981 lower_omp_single_simple (gimple_omp_single single_stmt, gimple_seq *pre_p)
8983 location_t loc = gimple_location (single_stmt);
8984 tree tlabel = create_artificial_label (loc);
8985 tree flabel = create_artificial_label (loc);
8986 gimple call, cond;
8987 tree lhs, decl;
8989 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
8990 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8991 call = gimple_build_call (decl, 0);
8992 gimple_call_set_lhs (call, lhs);
8993 gimple_seq_add_stmt (pre_p, call);
8995 cond = gimple_build_cond (EQ_EXPR, lhs,
8996 fold_convert_loc (loc, TREE_TYPE (lhs),
8997 boolean_true_node),
8998 tlabel, flabel);
8999 gimple_seq_add_stmt (pre_p, cond);
9000 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
9001 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
9002 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
9006 /* A subroutine of lower_omp_single. Expand the simple form of
9007 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
9009 #pragma omp single copyprivate (a, b, c)
9011 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
9014 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
9016 BODY;
9017 copyout.a = a;
9018 copyout.b = b;
9019 copyout.c = c;
9020 GOMP_single_copy_end (&copyout);
9022 else
9024 a = copyout_p->a;
9025 b = copyout_p->b;
9026 c = copyout_p->c;
9028 GOMP_barrier ();
9031 FIXME. It may be better to delay expanding the logic of this until
9032 pass_expand_omp. The expanded logic may make the job more difficult
9033 to a synchronization analysis pass. */
9035 static void
9036 lower_omp_single_copy (gimple_omp_single single_stmt, gimple_seq *pre_p,
9037 omp_context *ctx)
9039 tree ptr_type, t, l0, l1, l2, bfn_decl;
9040 gimple_seq copyin_seq;
9041 location_t loc = gimple_location (single_stmt);
9043 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
9045 ptr_type = build_pointer_type (ctx->record_type);
9046 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
9048 l0 = create_artificial_label (loc);
9049 l1 = create_artificial_label (loc);
9050 l2 = create_artificial_label (loc);
9052 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
9053 t = build_call_expr_loc (loc, bfn_decl, 0);
9054 t = fold_convert_loc (loc, ptr_type, t);
9055 gimplify_assign (ctx->receiver_decl, t, pre_p);
9057 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
9058 build_int_cst (ptr_type, 0));
9059 t = build3 (COND_EXPR, void_type_node, t,
9060 build_and_jump (&l0), build_and_jump (&l1));
9061 gimplify_and_add (t, pre_p);
9063 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
9065 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
9067 copyin_seq = NULL;
9068 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
9069 &copyin_seq, ctx);
9071 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9072 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
9073 t = build_call_expr_loc (loc, bfn_decl, 1, t);
9074 gimplify_and_add (t, pre_p);
9076 t = build_and_jump (&l2);
9077 gimplify_and_add (t, pre_p);
9079 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
9081 gimple_seq_add_seq (pre_p, copyin_seq);
9083 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
9087 /* Expand code for an OpenMP single directive. */
9089 static void
9090 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9092 tree block;
9093 gimple t;
9094 gimple_omp_single single_stmt = as_a <gimple_omp_single> (gsi_stmt (*gsi_p));
9095 gimple_bind bind;
9096 gimple_seq bind_body, bind_body_tail = NULL, dlist;
9098 push_gimplify_context ();
9100 block = make_node (BLOCK);
9101 bind = gimple_build_bind (NULL, NULL, block);
9102 gsi_replace (gsi_p, bind, true);
9103 bind_body = NULL;
9104 dlist = NULL;
9105 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
9106 &bind_body, &dlist, ctx, NULL);
9107 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
9109 gimple_seq_add_stmt (&bind_body, single_stmt);
9111 if (ctx->record_type)
9112 lower_omp_single_copy (single_stmt, &bind_body, ctx);
9113 else
9114 lower_omp_single_simple (single_stmt, &bind_body);
9116 gimple_omp_set_body (single_stmt, NULL);
9118 gimple_seq_add_seq (&bind_body, dlist);
9120 bind_body = maybe_catch_exception (bind_body);
9122 t = gimple_build_omp_return
9123 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
9124 OMP_CLAUSE_NOWAIT));
9125 gimple_seq_add_stmt (&bind_body_tail, t);
9126 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
9127 if (ctx->record_type)
9129 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
9130 tree clobber = build_constructor (ctx->record_type, NULL);
9131 TREE_THIS_VOLATILE (clobber) = 1;
9132 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
9133 clobber), GSI_SAME_STMT);
9135 gimple_seq_add_seq (&bind_body, bind_body_tail);
9136 gimple_bind_set_body (bind, bind_body);
9138 pop_gimplify_context (bind);
9140 gimple_bind_append_vars (bind, ctx->block_vars);
9141 BLOCK_VARS (block) = ctx->block_vars;
9142 if (BLOCK_VARS (block))
9143 TREE_USED (block) = 1;
9147 /* Expand code for an OpenMP master directive. */
9149 static void
9150 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9152 tree block, lab = NULL, x, bfn_decl;
9153 gimple stmt = gsi_stmt (*gsi_p);
9154 gimple_bind bind;
9155 location_t loc = gimple_location (stmt);
9156 gimple_seq tseq;
9158 push_gimplify_context ();
9160 block = make_node (BLOCK);
9161 bind = gimple_build_bind (NULL, NULL, block);
9162 gsi_replace (gsi_p, bind, true);
9163 gimple_bind_add_stmt (bind, stmt);
9165 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9166 x = build_call_expr_loc (loc, bfn_decl, 0);
9167 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
9168 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
9169 tseq = NULL;
9170 gimplify_and_add (x, &tseq);
9171 gimple_bind_add_seq (bind, tseq);
9173 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9174 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9175 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9176 gimple_omp_set_body (stmt, NULL);
9178 gimple_bind_add_stmt (bind, gimple_build_label (lab));
9180 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9182 pop_gimplify_context (bind);
9184 gimple_bind_append_vars (bind, ctx->block_vars);
9185 BLOCK_VARS (block) = ctx->block_vars;
9189 /* Expand code for an OpenMP taskgroup directive. */
9191 static void
9192 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9194 gimple stmt = gsi_stmt (*gsi_p);
9195 gimple_call x;
9196 gimple_bind bind;
9197 tree block = make_node (BLOCK);
9199 bind = gimple_build_bind (NULL, NULL, block);
9200 gsi_replace (gsi_p, bind, true);
9201 gimple_bind_add_stmt (bind, stmt);
9203 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
9205 gimple_bind_add_stmt (bind, x);
9207 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9208 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9209 gimple_omp_set_body (stmt, NULL);
9211 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9213 gimple_bind_append_vars (bind, ctx->block_vars);
9214 BLOCK_VARS (block) = ctx->block_vars;
9218 /* Expand code for an OpenMP ordered directive. */
9220 static void
9221 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9223 tree block;
9224 gimple stmt = gsi_stmt (*gsi_p);
9225 gimple_call x;
9226 gimple_bind bind;
9228 push_gimplify_context ();
9230 block = make_node (BLOCK);
9231 bind = gimple_build_bind (NULL, NULL, block);
9232 gsi_replace (gsi_p, bind, true);
9233 gimple_bind_add_stmt (bind, stmt);
9235 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
9237 gimple_bind_add_stmt (bind, x);
9239 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9240 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9241 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9242 gimple_omp_set_body (stmt, NULL);
9244 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
9245 gimple_bind_add_stmt (bind, x);
9247 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9249 pop_gimplify_context (bind);
9251 gimple_bind_append_vars (bind, ctx->block_vars);
9252 BLOCK_VARS (block) = gimple_bind_vars (bind);
9256 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
9257 substitution of a couple of function calls. But in the NAMED case,
9258 requires that languages coordinate a symbol name. It is therefore
9259 best put here in common code. */
9261 static GTY((param1_is (tree), param2_is (tree)))
9262 splay_tree critical_name_mutexes;
9264 static void
9265 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9267 tree block;
9268 tree name, lock, unlock;
9269 gimple_omp_critical stmt = as_a <gimple_omp_critical> (gsi_stmt (*gsi_p));
9270 gimple_bind bind;
9271 location_t loc = gimple_location (stmt);
9272 gimple_seq tbody;
9274 name = gimple_omp_critical_name (stmt);
9275 if (name)
9277 tree decl;
9278 splay_tree_node n;
9280 if (!critical_name_mutexes)
9281 critical_name_mutexes
9282 = splay_tree_new_ggc (splay_tree_compare_pointers,
9283 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9284 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9286 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
9287 if (n == NULL)
9289 char *new_str;
9291 decl = create_tmp_var_raw (ptr_type_node, NULL);
9293 new_str = ACONCAT ((".gomp_critical_user_",
9294 IDENTIFIER_POINTER (name), NULL));
9295 DECL_NAME (decl) = get_identifier (new_str);
9296 TREE_PUBLIC (decl) = 1;
9297 TREE_STATIC (decl) = 1;
9298 DECL_COMMON (decl) = 1;
9299 DECL_ARTIFICIAL (decl) = 1;
9300 DECL_IGNORED_P (decl) = 1;
9301 varpool_node::finalize_decl (decl);
9303 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
9304 (splay_tree_value) decl);
9306 else
9307 decl = (tree) n->value;
9309 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
9310 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
9312 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
9313 unlock = build_call_expr_loc (loc, unlock, 1,
9314 build_fold_addr_expr_loc (loc, decl));
9316 else
9318 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
9319 lock = build_call_expr_loc (loc, lock, 0);
9321 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
9322 unlock = build_call_expr_loc (loc, unlock, 0);
9325 push_gimplify_context ();
9327 block = make_node (BLOCK);
9328 bind = gimple_build_bind (NULL, NULL, block);
9329 gsi_replace (gsi_p, bind, true);
9330 gimple_bind_add_stmt (bind, stmt);
9332 tbody = gimple_bind_body (bind);
9333 gimplify_and_add (lock, &tbody);
9334 gimple_bind_set_body (bind, tbody);
9336 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9337 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9338 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9339 gimple_omp_set_body (stmt, NULL);
9341 tbody = gimple_bind_body (bind);
9342 gimplify_and_add (unlock, &tbody);
9343 gimple_bind_set_body (bind, tbody);
9345 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9347 pop_gimplify_context (bind);
9348 gimple_bind_append_vars (bind, ctx->block_vars);
9349 BLOCK_VARS (block) = gimple_bind_vars (bind);
9353 /* A subroutine of lower_omp_for. Generate code to emit the predicate
9354 for a lastprivate clause. Given a loop control predicate of (V
9355 cond N2), we gate the clause on (!(V cond N2)). The lowered form
9356 is appended to *DLIST, iterator initialization is appended to
9357 *BODY_P. */
9359 static void
9360 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
9361 gimple_seq *dlist, struct omp_context *ctx)
9363 tree clauses, cond, vinit;
9364 enum tree_code cond_code;
9365 gimple_seq stmts;
9367 cond_code = fd->loop.cond_code;
9368 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
9370 /* When possible, use a strict equality expression. This can let VRP
9371 type optimizations deduce the value and remove a copy. */
9372 if (tree_fits_shwi_p (fd->loop.step))
9374 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
9375 if (step == 1 || step == -1)
9376 cond_code = EQ_EXPR;
9379 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
9381 clauses = gimple_omp_for_clauses (fd->for_stmt);
9382 stmts = NULL;
9383 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
9384 if (!gimple_seq_empty_p (stmts))
9386 gimple_seq_add_seq (&stmts, *dlist);
9387 *dlist = stmts;
9389 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
9390 vinit = fd->loop.n1;
9391 if (cond_code == EQ_EXPR
9392 && tree_fits_shwi_p (fd->loop.n2)
9393 && ! integer_zerop (fd->loop.n2))
9394 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
9395 else
9396 vinit = unshare_expr (vinit);
9398 /* Initialize the iterator variable, so that threads that don't execute
9399 any iterations don't execute the lastprivate clauses by accident. */
9400 gimplify_assign (fd->loop.v, vinit, body_p);
9405 /* Lower code for an OpenMP loop directive. */
9407 static void
9408 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9410 tree *rhs_p, block;
9411 struct omp_for_data fd, *fdp = NULL;
9412 gimple_omp_for stmt = as_a <gimple_omp_for> (gsi_stmt (*gsi_p));
9413 gimple_bind new_stmt;
9414 gimple_seq omp_for_body, body, dlist;
9415 size_t i;
9417 push_gimplify_context ();
9419 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
9421 block = make_node (BLOCK);
9422 new_stmt = gimple_build_bind (NULL, NULL, block);
9423 /* Replace at gsi right away, so that 'stmt' is no member
9424 of a sequence anymore as we're going to add to to a different
9425 one below. */
9426 gsi_replace (gsi_p, new_stmt, true);
9428 /* Move declaration of temporaries in the loop body before we make
9429 it go away. */
9430 omp_for_body = gimple_omp_body (stmt);
9431 if (!gimple_seq_empty_p (omp_for_body)
9432 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
9434 gimple_bind inner_bind =
9435 as_a <gimple_bind> (gimple_seq_first_stmt (omp_for_body));
9436 tree vars = gimple_bind_vars (inner_bind);
9437 gimple_bind_append_vars (new_stmt, vars);
9438 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
9439 keep them on the inner_bind and it's block. */
9440 gimple_bind_set_vars (inner_bind, NULL_TREE);
9441 if (gimple_bind_block (inner_bind))
9442 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
9445 if (gimple_omp_for_combined_into_p (stmt))
9447 extract_omp_for_data (stmt, &fd, NULL);
9448 fdp = &fd;
9450 /* We need two temporaries with fd.loop.v type (istart/iend)
9451 and then (fd.collapse - 1) temporaries with the same
9452 type for count2 ... countN-1 vars if not constant. */
9453 size_t count = 2;
9454 tree type = fd.iter_type;
9455 if (fd.collapse > 1
9456 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
9457 count += fd.collapse - 1;
9458 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
9459 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
9460 tree clauses = *pc;
9461 if (parallel_for)
9462 outerc
9463 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
9464 OMP_CLAUSE__LOOPTEMP_);
9465 for (i = 0; i < count; i++)
9467 tree temp;
9468 if (parallel_for)
9470 gcc_assert (outerc);
9471 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
9472 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
9473 OMP_CLAUSE__LOOPTEMP_);
9475 else
9477 temp = create_tmp_var (type, NULL);
9478 insert_decl_map (&ctx->outer->cb, temp, temp);
9480 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
9481 OMP_CLAUSE_DECL (*pc) = temp;
9482 pc = &OMP_CLAUSE_CHAIN (*pc);
9484 *pc = clauses;
9487 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9488 dlist = NULL;
9489 body = NULL;
9490 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
9491 fdp);
9492 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
9494 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9496 /* Lower the header expressions. At this point, we can assume that
9497 the header is of the form:
9499 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9501 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9502 using the .omp_data_s mapping, if needed. */
9503 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
9505 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
9506 if (!is_gimple_min_invariant (*rhs_p))
9507 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9509 rhs_p = gimple_omp_for_final_ptr (stmt, i);
9510 if (!is_gimple_min_invariant (*rhs_p))
9511 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9513 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
9514 if (!is_gimple_min_invariant (*rhs_p))
9515 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9518 /* Once lowered, extract the bounds and clauses. */
9519 extract_omp_for_data (stmt, &fd, NULL);
9521 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
9523 gimple_seq_add_stmt (&body, stmt);
9524 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
9526 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
9527 fd.loop.v));
9529 /* After the loop, add exit clauses. */
9530 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
9532 if (ctx->cancellable)
9533 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
9535 gimple_seq_add_seq (&body, dlist);
9537 body = maybe_catch_exception (body);
9539 /* Region exit marker goes at the end of the loop body. */
9540 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
9541 maybe_add_implicit_barrier_cancel (ctx, &body);
9542 pop_gimplify_context (new_stmt);
9544 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9545 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
9546 if (BLOCK_VARS (block))
9547 TREE_USED (block) = 1;
9549 gimple_bind_set_body (new_stmt, body);
9550 gimple_omp_set_body (stmt, NULL);
9551 gimple_omp_for_set_pre_body (stmt, NULL);
9554 /* Callback for walk_stmts. Check if the current statement only contains
9555 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9557 static tree
9558 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9559 bool *handled_ops_p,
9560 struct walk_stmt_info *wi)
9562 int *info = (int *) wi->info;
9563 gimple stmt = gsi_stmt (*gsi_p);
9565 *handled_ops_p = true;
9566 switch (gimple_code (stmt))
9568 WALK_SUBSTMTS;
9570 case GIMPLE_OMP_FOR:
9571 case GIMPLE_OMP_SECTIONS:
9572 *info = *info == 0 ? 1 : -1;
9573 break;
9574 default:
9575 *info = -1;
9576 break;
9578 return NULL;
9581 struct omp_taskcopy_context
9583 /* This field must be at the beginning, as we do "inheritance": Some
9584 callback functions for tree-inline.c (e.g., omp_copy_decl)
9585 receive a copy_body_data pointer that is up-casted to an
9586 omp_context pointer. */
9587 copy_body_data cb;
9588 omp_context *ctx;
9591 static tree
9592 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9594 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9596 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9597 return create_tmp_var (TREE_TYPE (var), NULL);
9599 return var;
9602 static tree
9603 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9605 tree name, new_fields = NULL, type, f;
9607 type = lang_hooks.types.make_type (RECORD_TYPE);
9608 name = DECL_NAME (TYPE_NAME (orig_type));
9609 name = build_decl (gimple_location (tcctx->ctx->stmt),
9610 TYPE_DECL, name, type);
9611 TYPE_NAME (type) = name;
9613 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9615 tree new_f = copy_node (f);
9616 DECL_CONTEXT (new_f) = type;
9617 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9618 TREE_CHAIN (new_f) = new_fields;
9619 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9620 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9621 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9622 &tcctx->cb, NULL);
9623 new_fields = new_f;
9624 tcctx->cb.decl_map->put (f, new_f);
9626 TYPE_FIELDS (type) = nreverse (new_fields);
9627 layout_type (type);
9628 return type;
9631 /* Create task copyfn. */
9633 static void
9634 create_task_copyfn (gimple_omp_task task_stmt, omp_context *ctx)
9636 struct function *child_cfun;
9637 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9638 tree record_type, srecord_type, bind, list;
9639 bool record_needs_remap = false, srecord_needs_remap = false;
9640 splay_tree_node n;
9641 struct omp_taskcopy_context tcctx;
9642 location_t loc = gimple_location (task_stmt);
9644 child_fn = gimple_omp_task_copy_fn (task_stmt);
9645 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9646 gcc_assert (child_cfun->cfg == NULL);
9647 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9649 /* Reset DECL_CONTEXT on function arguments. */
9650 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9651 DECL_CONTEXT (t) = child_fn;
9653 /* Populate the function. */
9654 push_gimplify_context ();
9655 push_cfun (child_cfun);
9657 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9658 TREE_SIDE_EFFECTS (bind) = 1;
9659 list = NULL;
9660 DECL_SAVED_TREE (child_fn) = bind;
9661 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9663 /* Remap src and dst argument types if needed. */
9664 record_type = ctx->record_type;
9665 srecord_type = ctx->srecord_type;
9666 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9667 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9669 record_needs_remap = true;
9670 break;
9672 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9673 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9675 srecord_needs_remap = true;
9676 break;
9679 if (record_needs_remap || srecord_needs_remap)
9681 memset (&tcctx, '\0', sizeof (tcctx));
9682 tcctx.cb.src_fn = ctx->cb.src_fn;
9683 tcctx.cb.dst_fn = child_fn;
9684 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
9685 gcc_checking_assert (tcctx.cb.src_node);
9686 tcctx.cb.dst_node = tcctx.cb.src_node;
9687 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9688 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9689 tcctx.cb.eh_lp_nr = 0;
9690 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9691 tcctx.cb.decl_map = new hash_map<tree, tree>;
9692 tcctx.ctx = ctx;
9694 if (record_needs_remap)
9695 record_type = task_copyfn_remap_type (&tcctx, record_type);
9696 if (srecord_needs_remap)
9697 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9699 else
9700 tcctx.cb.decl_map = NULL;
9702 arg = DECL_ARGUMENTS (child_fn);
9703 TREE_TYPE (arg) = build_pointer_type (record_type);
9704 sarg = DECL_CHAIN (arg);
9705 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9707 /* First pass: initialize temporaries used in record_type and srecord_type
9708 sizes and field offsets. */
9709 if (tcctx.cb.decl_map)
9710 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9711 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9713 tree *p;
9715 decl = OMP_CLAUSE_DECL (c);
9716 p = tcctx.cb.decl_map->get (decl);
9717 if (p == NULL)
9718 continue;
9719 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9720 sf = (tree) n->value;
9721 sf = *tcctx.cb.decl_map->get (sf);
9722 src = build_simple_mem_ref_loc (loc, sarg);
9723 src = omp_build_component_ref (src, sf);
9724 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9725 append_to_statement_list (t, &list);
9728 /* Second pass: copy shared var pointers and copy construct non-VLA
9729 firstprivate vars. */
9730 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9731 switch (OMP_CLAUSE_CODE (c))
9733 case OMP_CLAUSE_SHARED:
9734 decl = OMP_CLAUSE_DECL (c);
9735 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9736 if (n == NULL)
9737 break;
9738 f = (tree) n->value;
9739 if (tcctx.cb.decl_map)
9740 f = *tcctx.cb.decl_map->get (f);
9741 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9742 sf = (tree) n->value;
9743 if (tcctx.cb.decl_map)
9744 sf = *tcctx.cb.decl_map->get (sf);
9745 src = build_simple_mem_ref_loc (loc, sarg);
9746 src = omp_build_component_ref (src, sf);
9747 dst = build_simple_mem_ref_loc (loc, arg);
9748 dst = omp_build_component_ref (dst, f);
9749 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9750 append_to_statement_list (t, &list);
9751 break;
9752 case OMP_CLAUSE_FIRSTPRIVATE:
9753 decl = OMP_CLAUSE_DECL (c);
9754 if (is_variable_sized (decl))
9755 break;
9756 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9757 if (n == NULL)
9758 break;
9759 f = (tree) n->value;
9760 if (tcctx.cb.decl_map)
9761 f = *tcctx.cb.decl_map->get (f);
9762 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9763 if (n != NULL)
9765 sf = (tree) n->value;
9766 if (tcctx.cb.decl_map)
9767 sf = *tcctx.cb.decl_map->get (sf);
9768 src = build_simple_mem_ref_loc (loc, sarg);
9769 src = omp_build_component_ref (src, sf);
9770 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9771 src = build_simple_mem_ref_loc (loc, src);
9773 else
9774 src = decl;
9775 dst = build_simple_mem_ref_loc (loc, arg);
9776 dst = omp_build_component_ref (dst, f);
9777 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9778 append_to_statement_list (t, &list);
9779 break;
9780 case OMP_CLAUSE_PRIVATE:
9781 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9782 break;
9783 decl = OMP_CLAUSE_DECL (c);
9784 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9785 f = (tree) n->value;
9786 if (tcctx.cb.decl_map)
9787 f = *tcctx.cb.decl_map->get (f);
9788 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9789 if (n != NULL)
9791 sf = (tree) n->value;
9792 if (tcctx.cb.decl_map)
9793 sf = *tcctx.cb.decl_map->get (sf);
9794 src = build_simple_mem_ref_loc (loc, sarg);
9795 src = omp_build_component_ref (src, sf);
9796 if (use_pointer_for_field (decl, NULL))
9797 src = build_simple_mem_ref_loc (loc, src);
9799 else
9800 src = decl;
9801 dst = build_simple_mem_ref_loc (loc, arg);
9802 dst = omp_build_component_ref (dst, f);
9803 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9804 append_to_statement_list (t, &list);
9805 break;
9806 default:
9807 break;
9810 /* Last pass: handle VLA firstprivates. */
9811 if (tcctx.cb.decl_map)
9812 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9813 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9815 tree ind, ptr, df;
9817 decl = OMP_CLAUSE_DECL (c);
9818 if (!is_variable_sized (decl))
9819 continue;
9820 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9821 if (n == NULL)
9822 continue;
9823 f = (tree) n->value;
9824 f = *tcctx.cb.decl_map->get (f);
9825 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9826 ind = DECL_VALUE_EXPR (decl);
9827 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9828 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9829 n = splay_tree_lookup (ctx->sfield_map,
9830 (splay_tree_key) TREE_OPERAND (ind, 0));
9831 sf = (tree) n->value;
9832 sf = *tcctx.cb.decl_map->get (sf);
9833 src = build_simple_mem_ref_loc (loc, sarg);
9834 src = omp_build_component_ref (src, sf);
9835 src = build_simple_mem_ref_loc (loc, src);
9836 dst = build_simple_mem_ref_loc (loc, arg);
9837 dst = omp_build_component_ref (dst, f);
9838 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9839 append_to_statement_list (t, &list);
9840 n = splay_tree_lookup (ctx->field_map,
9841 (splay_tree_key) TREE_OPERAND (ind, 0));
9842 df = (tree) n->value;
9843 df = *tcctx.cb.decl_map->get (df);
9844 ptr = build_simple_mem_ref_loc (loc, arg);
9845 ptr = omp_build_component_ref (ptr, df);
9846 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9847 build_fold_addr_expr_loc (loc, dst));
9848 append_to_statement_list (t, &list);
9851 t = build1 (RETURN_EXPR, void_type_node, NULL);
9852 append_to_statement_list (t, &list);
9854 if (tcctx.cb.decl_map)
9855 delete tcctx.cb.decl_map;
9856 pop_gimplify_context (NULL);
9857 BIND_EXPR_BODY (bind) = list;
9858 pop_cfun ();
9861 static void
9862 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9864 tree c, clauses;
9865 gimple g;
9866 size_t n_in = 0, n_out = 0, idx = 2, i;
9868 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9869 OMP_CLAUSE_DEPEND);
9870 gcc_assert (clauses);
9871 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9872 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9873 switch (OMP_CLAUSE_DEPEND_KIND (c))
9875 case OMP_CLAUSE_DEPEND_IN:
9876 n_in++;
9877 break;
9878 case OMP_CLAUSE_DEPEND_OUT:
9879 case OMP_CLAUSE_DEPEND_INOUT:
9880 n_out++;
9881 break;
9882 default:
9883 gcc_unreachable ();
9885 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9886 tree array = create_tmp_var (type, NULL);
9887 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9888 NULL_TREE);
9889 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9890 gimple_seq_add_stmt (iseq, g);
9891 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9892 NULL_TREE);
9893 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9894 gimple_seq_add_stmt (iseq, g);
9895 for (i = 0; i < 2; i++)
9897 if ((i ? n_in : n_out) == 0)
9898 continue;
9899 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9900 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9901 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9903 tree t = OMP_CLAUSE_DECL (c);
9904 t = fold_convert (ptr_type_node, t);
9905 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9906 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9907 NULL_TREE, NULL_TREE);
9908 g = gimple_build_assign (r, t);
9909 gimple_seq_add_stmt (iseq, g);
9912 tree *p = gimple_omp_task_clauses_ptr (stmt);
9913 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9914 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9915 OMP_CLAUSE_CHAIN (c) = *p;
9916 *p = c;
9917 tree clobber = build_constructor (type, NULL);
9918 TREE_THIS_VOLATILE (clobber) = 1;
9919 g = gimple_build_assign (array, clobber);
9920 gimple_seq_add_stmt (oseq, g);
9923 /* Lower the OpenMP parallel or task directive in the current statement
9924 in GSI_P. CTX holds context information for the directive. */
9926 static void
9927 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9929 tree clauses;
9930 tree child_fn, t;
9931 gimple stmt = gsi_stmt (*gsi_p);
9932 gimple_bind par_bind, bind, dep_bind = NULL;
9933 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9934 location_t loc = gimple_location (stmt);
9936 clauses = gimple_omp_taskreg_clauses (stmt);
9937 par_bind =
9938 as_a <gimple_bind> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
9939 par_body = gimple_bind_body (par_bind);
9940 child_fn = ctx->cb.dst_fn;
9941 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9942 && !gimple_omp_parallel_combined_p (stmt))
9944 struct walk_stmt_info wi;
9945 int ws_num = 0;
9947 memset (&wi, 0, sizeof (wi));
9948 wi.info = &ws_num;
9949 wi.val_only = true;
9950 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
9951 if (ws_num == 1)
9952 gimple_omp_parallel_set_combined_p (stmt, true);
9954 gimple_seq dep_ilist = NULL;
9955 gimple_seq dep_olist = NULL;
9956 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9957 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9959 push_gimplify_context ();
9960 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9961 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9964 if (ctx->srecord_type)
9965 create_task_copyfn (as_a <gimple_omp_task> (stmt), ctx);
9967 push_gimplify_context ();
9969 par_olist = NULL;
9970 par_ilist = NULL;
9971 par_rlist = NULL;
9972 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
9973 lower_omp (&par_body, ctx);
9974 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
9975 lower_reduction_clauses (clauses, &par_rlist, ctx);
9977 /* Declare all the variables created by mapping and the variables
9978 declared in the scope of the parallel body. */
9979 record_vars_into (ctx->block_vars, child_fn);
9980 record_vars_into (gimple_bind_vars (par_bind), child_fn);
9982 if (ctx->record_type)
9984 ctx->sender_decl
9985 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9986 : ctx->record_type, ".omp_data_o");
9987 DECL_NAMELESS (ctx->sender_decl) = 1;
9988 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9989 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
9992 olist = NULL;
9993 ilist = NULL;
9994 lower_send_clauses (clauses, &ilist, &olist, ctx);
9995 lower_send_shared_vars (&ilist, &olist, ctx);
9997 if (ctx->record_type)
9999 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
10000 TREE_THIS_VOLATILE (clobber) = 1;
10001 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
10002 clobber));
10005 /* Once all the expansions are done, sequence all the different
10006 fragments inside gimple_omp_body. */
10008 new_body = NULL;
10010 if (ctx->record_type)
10012 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10013 /* fixup_child_record_type might have changed receiver_decl's type. */
10014 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10015 gimple_seq_add_stmt (&new_body,
10016 gimple_build_assign (ctx->receiver_decl, t));
10019 gimple_seq_add_seq (&new_body, par_ilist);
10020 gimple_seq_add_seq (&new_body, par_body);
10021 gimple_seq_add_seq (&new_body, par_rlist);
10022 if (ctx->cancellable)
10023 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
10024 gimple_seq_add_seq (&new_body, par_olist);
10025 new_body = maybe_catch_exception (new_body);
10026 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10027 gimple_omp_set_body (stmt, new_body);
10029 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
10030 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
10031 gimple_bind_add_seq (bind, ilist);
10032 gimple_bind_add_stmt (bind, stmt);
10033 gimple_bind_add_seq (bind, olist);
10035 pop_gimplify_context (NULL);
10037 if (dep_bind)
10039 gimple_bind_add_seq (dep_bind, dep_ilist);
10040 gimple_bind_add_stmt (dep_bind, bind);
10041 gimple_bind_add_seq (dep_bind, dep_olist);
10042 pop_gimplify_context (dep_bind);
10046 /* Lower the OpenMP target directive in the current statement
10047 in GSI_P. CTX holds context information for the directive. */
10049 static void
10050 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10052 tree clauses;
10053 tree child_fn, t, c;
10054 gimple_omp_target stmt = as_a <gimple_omp_target> (gsi_stmt (*gsi_p));
10055 gimple_bind tgt_bind = NULL, bind;
10056 gimple_seq tgt_body = NULL, olist, ilist, new_body;
10057 location_t loc = gimple_location (stmt);
10058 int kind = gimple_omp_target_kind (stmt);
10059 unsigned int map_cnt = 0;
10061 clauses = gimple_omp_target_clauses (stmt);
10062 if (kind == GF_OMP_TARGET_KIND_REGION)
10064 tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
10065 tgt_body = gimple_bind_body (tgt_bind);
10067 else if (kind == GF_OMP_TARGET_KIND_DATA)
10068 tgt_body = gimple_omp_body (stmt);
10069 child_fn = ctx->cb.dst_fn;
10071 push_gimplify_context ();
10073 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10074 switch (OMP_CLAUSE_CODE (c))
10076 tree var, x;
10078 default:
10079 break;
10080 case OMP_CLAUSE_MAP:
10081 case OMP_CLAUSE_TO:
10082 case OMP_CLAUSE_FROM:
10083 var = OMP_CLAUSE_DECL (c);
10084 if (!DECL_P (var))
10086 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
10087 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10088 map_cnt++;
10089 continue;
10092 if (DECL_SIZE (var)
10093 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
10095 tree var2 = DECL_VALUE_EXPR (var);
10096 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
10097 var2 = TREE_OPERAND (var2, 0);
10098 gcc_assert (DECL_P (var2));
10099 var = var2;
10102 if (!maybe_lookup_field (var, ctx))
10103 continue;
10105 if (kind == GF_OMP_TARGET_KIND_REGION)
10107 x = build_receiver_ref (var, true, ctx);
10108 tree new_var = lookup_decl (var, ctx);
10109 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10110 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10111 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10112 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
10113 x = build_simple_mem_ref (x);
10114 SET_DECL_VALUE_EXPR (new_var, x);
10115 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
10117 map_cnt++;
10120 if (kind == GF_OMP_TARGET_KIND_REGION)
10122 target_nesting_level++;
10123 lower_omp (&tgt_body, ctx);
10124 target_nesting_level--;
10126 else if (kind == GF_OMP_TARGET_KIND_DATA)
10127 lower_omp (&tgt_body, ctx);
10129 if (kind == GF_OMP_TARGET_KIND_REGION)
10131 /* Declare all the variables created by mapping and the variables
10132 declared in the scope of the target body. */
10133 record_vars_into (ctx->block_vars, child_fn);
10134 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
10137 olist = NULL;
10138 ilist = NULL;
10139 if (ctx->record_type)
10141 ctx->sender_decl
10142 = create_tmp_var (ctx->record_type, ".omp_data_arr");
10143 DECL_NAMELESS (ctx->sender_decl) = 1;
10144 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
10145 t = make_tree_vec (3);
10146 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
10147 TREE_VEC_ELT (t, 1)
10148 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
10149 ".omp_data_sizes");
10150 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
10151 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
10152 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
10153 TREE_VEC_ELT (t, 2)
10154 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
10155 map_cnt),
10156 ".omp_data_kinds");
10157 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
10158 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
10159 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
10160 gimple_omp_target_set_data_arg (stmt, t);
10162 vec<constructor_elt, va_gc> *vsize;
10163 vec<constructor_elt, va_gc> *vkind;
10164 vec_alloc (vsize, map_cnt);
10165 vec_alloc (vkind, map_cnt);
10166 unsigned int map_idx = 0;
10168 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10169 switch (OMP_CLAUSE_CODE (c))
10171 tree ovar, nc;
10173 default:
10174 break;
10175 case OMP_CLAUSE_MAP:
10176 case OMP_CLAUSE_TO:
10177 case OMP_CLAUSE_FROM:
10178 nc = c;
10179 ovar = OMP_CLAUSE_DECL (c);
10180 if (!DECL_P (ovar))
10182 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10183 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10185 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
10186 == get_base_address (ovar));
10187 nc = OMP_CLAUSE_CHAIN (c);
10188 ovar = OMP_CLAUSE_DECL (nc);
10190 else
10192 tree x = build_sender_ref (ovar, ctx);
10193 tree v
10194 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
10195 gimplify_assign (x, v, &ilist);
10196 nc = NULL_TREE;
10199 else
10201 if (DECL_SIZE (ovar)
10202 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
10204 tree ovar2 = DECL_VALUE_EXPR (ovar);
10205 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
10206 ovar2 = TREE_OPERAND (ovar2, 0);
10207 gcc_assert (DECL_P (ovar2));
10208 ovar = ovar2;
10210 if (!maybe_lookup_field (ovar, ctx))
10211 continue;
10214 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
10215 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
10216 talign = DECL_ALIGN_UNIT (ovar);
10217 if (nc)
10219 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
10220 tree x = build_sender_ref (ovar, ctx);
10221 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10222 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10223 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10224 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
10226 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10227 tree avar
10228 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
10229 mark_addressable (avar);
10230 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
10231 talign = DECL_ALIGN_UNIT (avar);
10232 avar = build_fold_addr_expr (avar);
10233 gimplify_assign (x, avar, &ilist);
10235 else if (is_gimple_reg (var))
10237 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10238 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
10239 mark_addressable (avar);
10240 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
10241 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
10242 gimplify_assign (avar, var, &ilist);
10243 avar = build_fold_addr_expr (avar);
10244 gimplify_assign (x, avar, &ilist);
10245 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
10246 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
10247 && !TYPE_READONLY (TREE_TYPE (var)))
10249 x = build_sender_ref (ovar, ctx);
10250 x = build_simple_mem_ref (x);
10251 gimplify_assign (var, x, &olist);
10254 else
10256 var = build_fold_addr_expr (var);
10257 gimplify_assign (x, var, &ilist);
10260 tree s = OMP_CLAUSE_SIZE (c);
10261 if (s == NULL_TREE)
10262 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
10263 s = fold_convert (size_type_node, s);
10264 tree purpose = size_int (map_idx++);
10265 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
10266 if (TREE_CODE (s) != INTEGER_CST)
10267 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
10269 unsigned char tkind = 0;
10270 switch (OMP_CLAUSE_CODE (c))
10272 case OMP_CLAUSE_MAP:
10273 tkind = OMP_CLAUSE_MAP_KIND (c);
10274 break;
10275 case OMP_CLAUSE_TO:
10276 tkind = OMP_CLAUSE_MAP_TO;
10277 break;
10278 case OMP_CLAUSE_FROM:
10279 tkind = OMP_CLAUSE_MAP_FROM;
10280 break;
10281 default:
10282 gcc_unreachable ();
10284 talign = ceil_log2 (talign);
10285 tkind |= talign << 3;
10286 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
10287 build_int_cst (unsigned_char_type_node,
10288 tkind));
10289 if (nc && nc != c)
10290 c = nc;
10293 gcc_assert (map_idx == map_cnt);
10295 DECL_INITIAL (TREE_VEC_ELT (t, 1))
10296 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
10297 DECL_INITIAL (TREE_VEC_ELT (t, 2))
10298 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
10299 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
10301 gimple_seq initlist = NULL;
10302 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
10303 TREE_VEC_ELT (t, 1)),
10304 &initlist, true, NULL_TREE);
10305 gimple_seq_add_seq (&ilist, initlist);
10307 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
10308 NULL);
10309 TREE_THIS_VOLATILE (clobber) = 1;
10310 gimple_seq_add_stmt (&olist,
10311 gimple_build_assign (TREE_VEC_ELT (t, 1),
10312 clobber));
10315 tree clobber = build_constructor (ctx->record_type, NULL);
10316 TREE_THIS_VOLATILE (clobber) = 1;
10317 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
10318 clobber));
10321 /* Once all the expansions are done, sequence all the different
10322 fragments inside gimple_omp_body. */
10324 new_body = NULL;
10326 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
10328 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10329 /* fixup_child_record_type might have changed receiver_decl's type. */
10330 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10331 gimple_seq_add_stmt (&new_body,
10332 gimple_build_assign (ctx->receiver_decl, t));
10335 if (kind == GF_OMP_TARGET_KIND_REGION)
10337 gimple_seq_add_seq (&new_body, tgt_body);
10338 new_body = maybe_catch_exception (new_body);
10340 else if (kind == GF_OMP_TARGET_KIND_DATA)
10341 new_body = tgt_body;
10342 if (kind != GF_OMP_TARGET_KIND_UPDATE)
10344 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10345 gimple_omp_set_body (stmt, new_body);
10348 bind = gimple_build_bind (NULL, NULL,
10349 tgt_bind ? gimple_bind_block (tgt_bind)
10350 : NULL_TREE);
10351 gsi_replace (gsi_p, bind, true);
10352 gimple_bind_add_seq (bind, ilist);
10353 gimple_bind_add_stmt (bind, stmt);
10354 gimple_bind_add_seq (bind, olist);
10356 pop_gimplify_context (NULL);
10359 /* Expand code for an OpenMP teams directive. */
10361 static void
10362 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10364 gimple_omp_teams teams_stmt = as_a <gimple_omp_teams> (gsi_stmt (*gsi_p));
10365 push_gimplify_context ();
10367 tree block = make_node (BLOCK);
10368 gimple_bind bind = gimple_build_bind (NULL, NULL, block);
10369 gsi_replace (gsi_p, bind, true);
10370 gimple_seq bind_body = NULL;
10371 gimple_seq dlist = NULL;
10372 gimple_seq olist = NULL;
10374 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10375 OMP_CLAUSE_NUM_TEAMS);
10376 if (num_teams == NULL_TREE)
10377 num_teams = build_int_cst (unsigned_type_node, 0);
10378 else
10380 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
10381 num_teams = fold_convert (unsigned_type_node, num_teams);
10382 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
10384 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10385 OMP_CLAUSE_THREAD_LIMIT);
10386 if (thread_limit == NULL_TREE)
10387 thread_limit = build_int_cst (unsigned_type_node, 0);
10388 else
10390 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
10391 thread_limit = fold_convert (unsigned_type_node, thread_limit);
10392 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
10393 fb_rvalue);
10396 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
10397 &bind_body, &dlist, ctx, NULL);
10398 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
10399 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
10400 gimple_seq_add_stmt (&bind_body, teams_stmt);
10402 location_t loc = gimple_location (teams_stmt);
10403 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
10404 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
10405 gimple_set_location (call, loc);
10406 gimple_seq_add_stmt (&bind_body, call);
10408 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
10409 gimple_omp_set_body (teams_stmt, NULL);
10410 gimple_seq_add_seq (&bind_body, olist);
10411 gimple_seq_add_seq (&bind_body, dlist);
10412 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
10413 gimple_bind_set_body (bind, bind_body);
10415 pop_gimplify_context (bind);
10417 gimple_bind_append_vars (bind, ctx->block_vars);
10418 BLOCK_VARS (block) = ctx->block_vars;
10419 if (BLOCK_VARS (block))
10420 TREE_USED (block) = 1;
10424 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
10425 regimplified. If DATA is non-NULL, lower_omp_1 is outside
10426 of OpenMP context, but with task_shared_vars set. */
10428 static tree
10429 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
10430 void *data)
10432 tree t = *tp;
10434 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
10435 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
10436 return t;
10438 if (task_shared_vars
10439 && DECL_P (t)
10440 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
10441 return t;
10443 /* If a global variable has been privatized, TREE_CONSTANT on
10444 ADDR_EXPR might be wrong. */
10445 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
10446 recompute_tree_invariant_for_addr_expr (t);
10448 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
10449 return NULL_TREE;
10452 static void
10453 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10455 gimple stmt = gsi_stmt (*gsi_p);
10456 struct walk_stmt_info wi;
10458 if (gimple_has_location (stmt))
10459 input_location = gimple_location (stmt);
10461 if (task_shared_vars)
10462 memset (&wi, '\0', sizeof (wi));
10464 /* If we have issued syntax errors, avoid doing any heavy lifting.
10465 Just replace the OpenMP directives with a NOP to avoid
10466 confusing RTL expansion. */
10467 if (seen_error () && is_gimple_omp (stmt))
10469 gsi_replace (gsi_p, gimple_build_nop (), true);
10470 return;
10473 switch (gimple_code (stmt))
10475 case GIMPLE_COND:
10476 if ((ctx || task_shared_vars)
10477 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
10478 ctx ? NULL : &wi, NULL)
10479 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
10480 ctx ? NULL : &wi, NULL)))
10481 gimple_regimplify_operands (stmt, gsi_p);
10482 break;
10483 case GIMPLE_CATCH:
10484 lower_omp (gimple_catch_handler_ptr (as_a <gimple_catch> (stmt)), ctx);
10485 break;
10486 case GIMPLE_EH_FILTER:
10487 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
10488 break;
10489 case GIMPLE_TRY:
10490 lower_omp (gimple_try_eval_ptr (stmt), ctx);
10491 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
10492 break;
10493 case GIMPLE_TRANSACTION:
10494 lower_omp (gimple_transaction_body_ptr (
10495 as_a <gimple_transaction> (stmt)),
10496 ctx);
10497 break;
10498 case GIMPLE_BIND:
10499 lower_omp (gimple_bind_body_ptr (as_a <gimple_bind> (stmt)), ctx);
10500 break;
10501 case GIMPLE_OMP_PARALLEL:
10502 case GIMPLE_OMP_TASK:
10503 ctx = maybe_lookup_ctx (stmt);
10504 gcc_assert (ctx);
10505 if (ctx->cancellable)
10506 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10507 lower_omp_taskreg (gsi_p, ctx);
10508 break;
10509 case GIMPLE_OMP_FOR:
10510 ctx = maybe_lookup_ctx (stmt);
10511 gcc_assert (ctx);
10512 if (ctx->cancellable)
10513 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10514 lower_omp_for (gsi_p, ctx);
10515 break;
10516 case GIMPLE_OMP_SECTIONS:
10517 ctx = maybe_lookup_ctx (stmt);
10518 gcc_assert (ctx);
10519 if (ctx->cancellable)
10520 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10521 lower_omp_sections (gsi_p, ctx);
10522 break;
10523 case GIMPLE_OMP_SINGLE:
10524 ctx = maybe_lookup_ctx (stmt);
10525 gcc_assert (ctx);
10526 lower_omp_single (gsi_p, ctx);
10527 break;
10528 case GIMPLE_OMP_MASTER:
10529 ctx = maybe_lookup_ctx (stmt);
10530 gcc_assert (ctx);
10531 lower_omp_master (gsi_p, ctx);
10532 break;
10533 case GIMPLE_OMP_TASKGROUP:
10534 ctx = maybe_lookup_ctx (stmt);
10535 gcc_assert (ctx);
10536 lower_omp_taskgroup (gsi_p, ctx);
10537 break;
10538 case GIMPLE_OMP_ORDERED:
10539 ctx = maybe_lookup_ctx (stmt);
10540 gcc_assert (ctx);
10541 lower_omp_ordered (gsi_p, ctx);
10542 break;
10543 case GIMPLE_OMP_CRITICAL:
10544 ctx = maybe_lookup_ctx (stmt);
10545 gcc_assert (ctx);
10546 lower_omp_critical (gsi_p, ctx);
10547 break;
10548 case GIMPLE_OMP_ATOMIC_LOAD:
10549 if ((ctx || task_shared_vars)
10550 && walk_tree (gimple_omp_atomic_load_rhs_ptr (
10551 as_a <gimple_omp_atomic_load> (stmt)),
10552 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
10553 gimple_regimplify_operands (stmt, gsi_p);
10554 break;
10555 case GIMPLE_OMP_TARGET:
10556 ctx = maybe_lookup_ctx (stmt);
10557 gcc_assert (ctx);
10558 lower_omp_target (gsi_p, ctx);
10559 break;
10560 case GIMPLE_OMP_TEAMS:
10561 ctx = maybe_lookup_ctx (stmt);
10562 gcc_assert (ctx);
10563 lower_omp_teams (gsi_p, ctx);
10564 break;
10565 case GIMPLE_CALL:
10566 tree fndecl;
10567 fndecl = gimple_call_fndecl (stmt);
10568 if (fndecl
10569 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10570 switch (DECL_FUNCTION_CODE (fndecl))
10572 case BUILT_IN_GOMP_BARRIER:
10573 if (ctx == NULL)
10574 break;
10575 /* FALLTHRU */
10576 case BUILT_IN_GOMP_CANCEL:
10577 case BUILT_IN_GOMP_CANCELLATION_POINT:
10578 omp_context *cctx;
10579 cctx = ctx;
10580 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10581 cctx = cctx->outer;
10582 gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
10583 if (!cctx->cancellable)
10585 if (DECL_FUNCTION_CODE (fndecl)
10586 == BUILT_IN_GOMP_CANCELLATION_POINT)
10588 stmt = gimple_build_nop ();
10589 gsi_replace (gsi_p, stmt, false);
10591 break;
10593 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10595 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10596 gimple_call_set_fndecl (stmt, fndecl);
10597 gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
10599 tree lhs;
10600 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)), NULL);
10601 gimple_call_set_lhs (stmt, lhs);
10602 tree fallthru_label;
10603 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10604 gimple g;
10605 g = gimple_build_label (fallthru_label);
10606 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10607 g = gimple_build_cond (NE_EXPR, lhs,
10608 fold_convert (TREE_TYPE (lhs),
10609 boolean_false_node),
10610 cctx->cancel_label, fallthru_label);
10611 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10612 break;
10613 default:
10614 break;
10616 /* FALLTHRU */
10617 default:
10618 if ((ctx || task_shared_vars)
10619 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10620 ctx ? NULL : &wi))
10622 /* Just remove clobbers, this should happen only if we have
10623 "privatized" local addressable variables in SIMD regions,
10624 the clobber isn't needed in that case and gimplifying address
10625 of the ARRAY_REF into a pointer and creating MEM_REF based
10626 clobber would create worse code than we get with the clobber
10627 dropped. */
10628 if (gimple_clobber_p (stmt))
10630 gsi_replace (gsi_p, gimple_build_nop (), true);
10631 break;
10633 gimple_regimplify_operands (stmt, gsi_p);
10635 break;
10639 static void
10640 lower_omp (gimple_seq *body, omp_context *ctx)
10642 location_t saved_location = input_location;
10643 gimple_stmt_iterator gsi;
10644 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10645 lower_omp_1 (&gsi, ctx);
10646 /* During gimplification, we have not always invoked fold_stmt
10647 (gimplify.c:maybe_fold_stmt); call it now. */
10648 if (target_nesting_level)
10649 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10650 fold_stmt (&gsi);
10651 input_location = saved_location;
10654 /* Main entry point. */
10656 static unsigned int
10657 execute_lower_omp (void)
10659 gimple_seq body;
10660 int i;
10661 omp_context *ctx;
10663 /* This pass always runs, to provide PROP_gimple_lomp.
10664 But there is nothing to do unless -fopenmp is given. */
10665 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_cilkplus == 0)
10666 return 0;
10668 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10669 delete_omp_context);
10671 body = gimple_body (current_function_decl);
10672 scan_omp (&body, NULL);
10673 gcc_assert (taskreg_nesting_level == 0);
10674 FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
10675 finish_taskreg_scan (ctx);
10676 taskreg_contexts.release ();
10678 if (all_contexts->root)
10680 if (task_shared_vars)
10681 push_gimplify_context ();
10682 lower_omp (&body, NULL);
10683 if (task_shared_vars)
10684 pop_gimplify_context (NULL);
10687 if (all_contexts)
10689 splay_tree_delete (all_contexts);
10690 all_contexts = NULL;
10692 BITMAP_FREE (task_shared_vars);
10693 return 0;
10696 namespace {
10698 const pass_data pass_data_lower_omp =
10700 GIMPLE_PASS, /* type */
10701 "omplower", /* name */
10702 OPTGROUP_NONE, /* optinfo_flags */
10703 TV_NONE, /* tv_id */
10704 PROP_gimple_any, /* properties_required */
10705 PROP_gimple_lomp, /* properties_provided */
10706 0, /* properties_destroyed */
10707 0, /* todo_flags_start */
10708 0, /* todo_flags_finish */
10711 class pass_lower_omp : public gimple_opt_pass
10713 public:
10714 pass_lower_omp (gcc::context *ctxt)
10715 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10718 /* opt_pass methods: */
10719 virtual unsigned int execute (function *) { return execute_lower_omp (); }
10721 }; // class pass_lower_omp
10723 } // anon namespace
10725 gimple_opt_pass *
10726 make_pass_lower_omp (gcc::context *ctxt)
10728 return new pass_lower_omp (ctxt);
10731 /* The following is a utility to diagnose OpenMP structured block violations.
10732 It is not part of the "omplower" pass, as that's invoked too late. It
10733 should be invoked by the respective front ends after gimplification. */
10735 static splay_tree all_labels;
10737 /* Check for mismatched contexts and generate an error if needed. Return
10738 true if an error is detected. */
10740 static bool
10741 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10742 gimple branch_ctx, gimple label_ctx)
10744 if (label_ctx == branch_ctx)
10745 return false;
10749 Previously we kept track of the label's entire context in diagnose_sb_[12]
10750 so we could traverse it and issue a correct "exit" or "enter" error
10751 message upon a structured block violation.
10753 We built the context by building a list with tree_cons'ing, but there is
10754 no easy counterpart in gimple tuples. It seems like far too much work
10755 for issuing exit/enter error messages. If someone really misses the
10756 distinct error message... patches welcome.
10759 #if 0
10760 /* Try to avoid confusing the user by producing and error message
10761 with correct "exit" or "enter" verbiage. We prefer "exit"
10762 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10763 if (branch_ctx == NULL)
10764 exit_p = false;
10765 else
10767 while (label_ctx)
10769 if (TREE_VALUE (label_ctx) == branch_ctx)
10771 exit_p = false;
10772 break;
10774 label_ctx = TREE_CHAIN (label_ctx);
10778 if (exit_p)
10779 error ("invalid exit from OpenMP structured block");
10780 else
10781 error ("invalid entry to OpenMP structured block");
10782 #endif
10784 bool cilkplus_block = false;
10785 if (flag_cilkplus)
10787 if ((branch_ctx
10788 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10789 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10790 || (label_ctx
10791 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
10792 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10793 cilkplus_block = true;
10796 /* If it's obvious we have an invalid entry, be specific about the error. */
10797 if (branch_ctx == NULL)
10799 if (cilkplus_block)
10800 error ("invalid entry to Cilk Plus structured block");
10801 else
10802 error ("invalid entry to OpenMP structured block");
10804 else
10806 /* Otherwise, be vague and lazy, but efficient. */
10807 if (cilkplus_block)
10808 error ("invalid branch to/from a Cilk Plus structured block");
10809 else
10810 error ("invalid branch to/from an OpenMP structured block");
10813 gsi_replace (gsi_p, gimple_build_nop (), false);
10814 return true;
10817 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10818 where each label is found. */
10820 static tree
10821 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10822 struct walk_stmt_info *wi)
10824 gimple context = (gimple) wi->info;
10825 gimple inner_context;
10826 gimple stmt = gsi_stmt (*gsi_p);
10828 *handled_ops_p = true;
10830 switch (gimple_code (stmt))
10832 WALK_SUBSTMTS;
10834 case GIMPLE_OMP_PARALLEL:
10835 case GIMPLE_OMP_TASK:
10836 case GIMPLE_OMP_SECTIONS:
10837 case GIMPLE_OMP_SINGLE:
10838 case GIMPLE_OMP_SECTION:
10839 case GIMPLE_OMP_MASTER:
10840 case GIMPLE_OMP_ORDERED:
10841 case GIMPLE_OMP_CRITICAL:
10842 case GIMPLE_OMP_TARGET:
10843 case GIMPLE_OMP_TEAMS:
10844 case GIMPLE_OMP_TASKGROUP:
10845 /* The minimal context here is just the current OMP construct. */
10846 inner_context = stmt;
10847 wi->info = inner_context;
10848 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10849 wi->info = context;
10850 break;
10852 case GIMPLE_OMP_FOR:
10853 inner_context = stmt;
10854 wi->info = inner_context;
10855 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10856 walk them. */
10857 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10858 diagnose_sb_1, NULL, wi);
10859 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10860 wi->info = context;
10861 break;
10863 case GIMPLE_LABEL:
10864 splay_tree_insert (all_labels,
10865 (splay_tree_key) gimple_label_label (
10866 as_a <gimple_label> (stmt)),
10867 (splay_tree_value) context);
10868 break;
10870 default:
10871 break;
10874 return NULL_TREE;
10877 /* Pass 2: Check each branch and see if its context differs from that of
10878 the destination label's context. */
10880 static tree
10881 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10882 struct walk_stmt_info *wi)
10884 gimple context = (gimple) wi->info;
10885 splay_tree_node n;
10886 gimple stmt = gsi_stmt (*gsi_p);
10888 *handled_ops_p = true;
10890 switch (gimple_code (stmt))
10892 WALK_SUBSTMTS;
10894 case GIMPLE_OMP_PARALLEL:
10895 case GIMPLE_OMP_TASK:
10896 case GIMPLE_OMP_SECTIONS:
10897 case GIMPLE_OMP_SINGLE:
10898 case GIMPLE_OMP_SECTION:
10899 case GIMPLE_OMP_MASTER:
10900 case GIMPLE_OMP_ORDERED:
10901 case GIMPLE_OMP_CRITICAL:
10902 case GIMPLE_OMP_TARGET:
10903 case GIMPLE_OMP_TEAMS:
10904 case GIMPLE_OMP_TASKGROUP:
10905 wi->info = stmt;
10906 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10907 wi->info = context;
10908 break;
10910 case GIMPLE_OMP_FOR:
10911 wi->info = stmt;
10912 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10913 walk them. */
10914 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10915 diagnose_sb_2, NULL, wi);
10916 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10917 wi->info = context;
10918 break;
10920 case GIMPLE_COND:
10922 gimple_cond cond_stmt = as_a <gimple_cond> (stmt);
10923 tree lab = gimple_cond_true_label (cond_stmt);
10924 if (lab)
10926 n = splay_tree_lookup (all_labels,
10927 (splay_tree_key) lab);
10928 diagnose_sb_0 (gsi_p, context,
10929 n ? (gimple) n->value : NULL);
10931 lab = gimple_cond_false_label (cond_stmt);
10932 if (lab)
10934 n = splay_tree_lookup (all_labels,
10935 (splay_tree_key) lab);
10936 diagnose_sb_0 (gsi_p, context,
10937 n ? (gimple) n->value : NULL);
10940 break;
10942 case GIMPLE_GOTO:
10944 tree lab = gimple_goto_dest (stmt);
10945 if (TREE_CODE (lab) != LABEL_DECL)
10946 break;
10948 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10949 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
10951 break;
10953 case GIMPLE_SWITCH:
10955 gimple_switch switch_stmt = as_a <gimple_switch> (stmt);
10956 unsigned int i;
10957 for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
10959 tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
10960 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10961 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
10962 break;
10965 break;
10967 case GIMPLE_RETURN:
10968 diagnose_sb_0 (gsi_p, context, NULL);
10969 break;
10971 default:
10972 break;
10975 return NULL_TREE;
10978 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10979 codes. */
10980 bool
10981 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
10982 int *region_idx)
10984 gimple last = last_stmt (bb);
10985 enum gimple_code code = gimple_code (last);
10986 struct omp_region *cur_region = *region;
10987 bool fallthru = false;
10989 switch (code)
10991 case GIMPLE_OMP_PARALLEL:
10992 case GIMPLE_OMP_TASK:
10993 case GIMPLE_OMP_FOR:
10994 case GIMPLE_OMP_SINGLE:
10995 case GIMPLE_OMP_TEAMS:
10996 case GIMPLE_OMP_MASTER:
10997 case GIMPLE_OMP_TASKGROUP:
10998 case GIMPLE_OMP_ORDERED:
10999 case GIMPLE_OMP_CRITICAL:
11000 case GIMPLE_OMP_SECTION:
11001 cur_region = new_omp_region (bb, code, cur_region);
11002 fallthru = true;
11003 break;
11005 case GIMPLE_OMP_TARGET:
11006 cur_region = new_omp_region (bb, code, cur_region);
11007 fallthru = true;
11008 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
11009 cur_region = cur_region->outer;
11010 break;
11012 case GIMPLE_OMP_SECTIONS:
11013 cur_region = new_omp_region (bb, code, cur_region);
11014 fallthru = true;
11015 break;
11017 case GIMPLE_OMP_SECTIONS_SWITCH:
11018 fallthru = false;
11019 break;
11021 case GIMPLE_OMP_ATOMIC_LOAD:
11022 case GIMPLE_OMP_ATOMIC_STORE:
11023 fallthru = true;
11024 break;
11026 case GIMPLE_OMP_RETURN:
11027 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
11028 somewhere other than the next block. This will be
11029 created later. */
11030 cur_region->exit = bb;
11031 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
11032 cur_region = cur_region->outer;
11033 break;
11035 case GIMPLE_OMP_CONTINUE:
11036 cur_region->cont = bb;
11037 switch (cur_region->type)
11039 case GIMPLE_OMP_FOR:
11040 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
11041 succs edges as abnormal to prevent splitting
11042 them. */
11043 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
11044 /* Make the loopback edge. */
11045 make_edge (bb, single_succ (cur_region->entry),
11046 EDGE_ABNORMAL);
11048 /* Create an edge from GIMPLE_OMP_FOR to exit, which
11049 corresponds to the case that the body of the loop
11050 is not executed at all. */
11051 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
11052 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
11053 fallthru = false;
11054 break;
11056 case GIMPLE_OMP_SECTIONS:
11057 /* Wire up the edges into and out of the nested sections. */
11059 basic_block switch_bb = single_succ (cur_region->entry);
11061 struct omp_region *i;
11062 for (i = cur_region->inner; i ; i = i->next)
11064 gcc_assert (i->type == GIMPLE_OMP_SECTION);
11065 make_edge (switch_bb, i->entry, 0);
11066 make_edge (i->exit, bb, EDGE_FALLTHRU);
11069 /* Make the loopback edge to the block with
11070 GIMPLE_OMP_SECTIONS_SWITCH. */
11071 make_edge (bb, switch_bb, 0);
11073 /* Make the edge from the switch to exit. */
11074 make_edge (switch_bb, bb->next_bb, 0);
11075 fallthru = false;
11077 break;
11079 default:
11080 gcc_unreachable ();
11082 break;
11084 default:
11085 gcc_unreachable ();
11088 if (*region != cur_region)
11090 *region = cur_region;
11091 if (cur_region)
11092 *region_idx = cur_region->entry->index;
11093 else
11094 *region_idx = 0;
11097 return fallthru;
11100 static unsigned int
11101 diagnose_omp_structured_block_errors (void)
11103 struct walk_stmt_info wi;
11104 gimple_seq body = gimple_body (current_function_decl);
11106 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
11108 memset (&wi, 0, sizeof (wi));
11109 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
11111 memset (&wi, 0, sizeof (wi));
11112 wi.want_locations = true;
11113 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
11115 gimple_set_body (current_function_decl, body);
11117 splay_tree_delete (all_labels);
11118 all_labels = NULL;
11120 return 0;
11123 namespace {
11125 const pass_data pass_data_diagnose_omp_blocks =
11127 GIMPLE_PASS, /* type */
11128 "*diagnose_omp_blocks", /* name */
11129 OPTGROUP_NONE, /* optinfo_flags */
11130 TV_NONE, /* tv_id */
11131 PROP_gimple_any, /* properties_required */
11132 0, /* properties_provided */
11133 0, /* properties_destroyed */
11134 0, /* todo_flags_start */
11135 0, /* todo_flags_finish */
11138 class pass_diagnose_omp_blocks : public gimple_opt_pass
11140 public:
11141 pass_diagnose_omp_blocks (gcc::context *ctxt)
11142 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
11145 /* opt_pass methods: */
11146 virtual bool gate (function *) { return flag_openmp || flag_cilkplus; }
11147 virtual unsigned int execute (function *)
11149 return diagnose_omp_structured_block_errors ();
11152 }; // class pass_diagnose_omp_blocks
11154 } // anon namespace
11156 gimple_opt_pass *
11157 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
11159 return new pass_diagnose_omp_blocks (ctxt);
11162 /* SIMD clone supporting code. */
11164 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
11165 of arguments to reserve space for. */
11167 static struct cgraph_simd_clone *
11168 simd_clone_struct_alloc (int nargs)
11170 struct cgraph_simd_clone *clone_info;
11171 size_t len = (sizeof (struct cgraph_simd_clone)
11172 + nargs * sizeof (struct cgraph_simd_clone_arg));
11173 clone_info = (struct cgraph_simd_clone *)
11174 ggc_internal_cleared_alloc (len);
11175 return clone_info;
11178 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
11180 static inline void
11181 simd_clone_struct_copy (struct cgraph_simd_clone *to,
11182 struct cgraph_simd_clone *from)
11184 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
11185 + ((from->nargs - from->inbranch)
11186 * sizeof (struct cgraph_simd_clone_arg))));
11189 /* Return vector of parameter types of function FNDECL. This uses
11190 TYPE_ARG_TYPES if available, otherwise falls back to types of
11191 DECL_ARGUMENTS types. */
11193 vec<tree>
11194 simd_clone_vector_of_formal_parm_types (tree fndecl)
11196 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
11197 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
11198 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
11199 unsigned int i;
11200 tree arg;
11201 FOR_EACH_VEC_ELT (args, i, arg)
11202 args[i] = TREE_TYPE (args[i]);
11203 return args;
11206 /* Given a simd function in NODE, extract the simd specific
11207 information from the OMP clauses passed in CLAUSES, and return
11208 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
11209 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
11210 otherwise set to FALSE. */
11212 static struct cgraph_simd_clone *
11213 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
11214 bool *inbranch_specified)
11216 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
11217 tree t;
11218 int n;
11219 *inbranch_specified = false;
11221 n = args.length ();
11222 if (n > 0 && args.last () == void_type_node)
11223 n--;
11225 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
11226 be cloned have a distinctive artificial label in addition to "omp
11227 declare simd". */
11228 bool cilk_clone
11229 = (flag_cilkplus
11230 && lookup_attribute ("cilk simd function",
11231 DECL_ATTRIBUTES (node->decl)));
11233 /* Allocate one more than needed just in case this is an in-branch
11234 clone which will require a mask argument. */
11235 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
11236 clone_info->nargs = n;
11237 clone_info->cilk_elemental = cilk_clone;
11239 if (!clauses)
11241 args.release ();
11242 return clone_info;
11244 clauses = TREE_VALUE (clauses);
11245 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
11246 return clone_info;
11248 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
11250 switch (OMP_CLAUSE_CODE (t))
11252 case OMP_CLAUSE_INBRANCH:
11253 clone_info->inbranch = 1;
11254 *inbranch_specified = true;
11255 break;
11256 case OMP_CLAUSE_NOTINBRANCH:
11257 clone_info->inbranch = 0;
11258 *inbranch_specified = true;
11259 break;
11260 case OMP_CLAUSE_SIMDLEN:
11261 clone_info->simdlen
11262 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
11263 break;
11264 case OMP_CLAUSE_LINEAR:
11266 tree decl = OMP_CLAUSE_DECL (t);
11267 tree step = OMP_CLAUSE_LINEAR_STEP (t);
11268 int argno = TREE_INT_CST_LOW (decl);
11269 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
11271 clone_info->args[argno].arg_type
11272 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
11273 clone_info->args[argno].linear_step = tree_to_shwi (step);
11274 gcc_assert (clone_info->args[argno].linear_step >= 0
11275 && clone_info->args[argno].linear_step < n);
11277 else
11279 if (POINTER_TYPE_P (args[argno]))
11280 step = fold_convert (ssizetype, step);
11281 if (!tree_fits_shwi_p (step))
11283 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11284 "ignoring large linear step");
11285 args.release ();
11286 return NULL;
11288 else if (integer_zerop (step))
11290 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11291 "ignoring zero linear step");
11292 args.release ();
11293 return NULL;
11295 else
11297 clone_info->args[argno].arg_type
11298 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
11299 clone_info->args[argno].linear_step = tree_to_shwi (step);
11302 break;
11304 case OMP_CLAUSE_UNIFORM:
11306 tree decl = OMP_CLAUSE_DECL (t);
11307 int argno = tree_to_uhwi (decl);
11308 clone_info->args[argno].arg_type
11309 = SIMD_CLONE_ARG_TYPE_UNIFORM;
11310 break;
11312 case OMP_CLAUSE_ALIGNED:
11314 tree decl = OMP_CLAUSE_DECL (t);
11315 int argno = tree_to_uhwi (decl);
11316 clone_info->args[argno].alignment
11317 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
11318 break;
11320 default:
11321 break;
11324 args.release ();
11325 return clone_info;
11328 /* Given a SIMD clone in NODE, calculate the characteristic data
11329 type and return the coresponding type. The characteristic data
11330 type is computed as described in the Intel Vector ABI. */
11332 static tree
11333 simd_clone_compute_base_data_type (struct cgraph_node *node,
11334 struct cgraph_simd_clone *clone_info)
11336 tree type = integer_type_node;
11337 tree fndecl = node->decl;
11339 /* a) For non-void function, the characteristic data type is the
11340 return type. */
11341 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
11342 type = TREE_TYPE (TREE_TYPE (fndecl));
11344 /* b) If the function has any non-uniform, non-linear parameters,
11345 then the characteristic data type is the type of the first
11346 such parameter. */
11347 else
11349 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
11350 for (unsigned int i = 0; i < clone_info->nargs; ++i)
11351 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
11353 type = map[i];
11354 break;
11356 map.release ();
11359 /* c) If the characteristic data type determined by a) or b) above
11360 is struct, union, or class type which is pass-by-value (except
11361 for the type that maps to the built-in complex data type), the
11362 characteristic data type is int. */
11363 if (RECORD_OR_UNION_TYPE_P (type)
11364 && !aggregate_value_p (type, NULL)
11365 && TREE_CODE (type) != COMPLEX_TYPE)
11366 return integer_type_node;
11368 /* d) If none of the above three classes is applicable, the
11369 characteristic data type is int. */
11371 return type;
11373 /* e) For Intel Xeon Phi native and offload compilation, if the
11374 resulting characteristic data type is 8-bit or 16-bit integer
11375 data type, the characteristic data type is int. */
11376 /* Well, we don't handle Xeon Phi yet. */
11379 static tree
11380 simd_clone_mangle (struct cgraph_node *node,
11381 struct cgraph_simd_clone *clone_info)
11383 char vecsize_mangle = clone_info->vecsize_mangle;
11384 char mask = clone_info->inbranch ? 'M' : 'N';
11385 unsigned int simdlen = clone_info->simdlen;
11386 unsigned int n;
11387 pretty_printer pp;
11389 gcc_assert (vecsize_mangle && simdlen);
11391 pp_string (&pp, "_ZGV");
11392 pp_character (&pp, vecsize_mangle);
11393 pp_character (&pp, mask);
11394 pp_decimal_int (&pp, simdlen);
11396 for (n = 0; n < clone_info->nargs; ++n)
11398 struct cgraph_simd_clone_arg arg = clone_info->args[n];
11400 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
11401 pp_character (&pp, 'u');
11402 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
11404 gcc_assert (arg.linear_step != 0);
11405 pp_character (&pp, 'l');
11406 if (arg.linear_step > 1)
11407 pp_unsigned_wide_integer (&pp, arg.linear_step);
11408 else if (arg.linear_step < 0)
11410 pp_character (&pp, 'n');
11411 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
11412 arg.linear_step));
11415 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
11417 pp_character (&pp, 's');
11418 pp_unsigned_wide_integer (&pp, arg.linear_step);
11420 else
11421 pp_character (&pp, 'v');
11422 if (arg.alignment)
11424 pp_character (&pp, 'a');
11425 pp_decimal_int (&pp, arg.alignment);
11429 pp_underscore (&pp);
11430 pp_string (&pp,
11431 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)));
11432 const char *str = pp_formatted_text (&pp);
11434 /* If there already is a SIMD clone with the same mangled name, don't
11435 add another one. This can happen e.g. for
11436 #pragma omp declare simd
11437 #pragma omp declare simd simdlen(8)
11438 int foo (int, int);
11439 if the simdlen is assumed to be 8 for the first one, etc. */
11440 for (struct cgraph_node *clone = node->simd_clones; clone;
11441 clone = clone->simdclone->next_clone)
11442 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
11443 str) == 0)
11444 return NULL_TREE;
11446 return get_identifier (str);
11449 /* Create a simd clone of OLD_NODE and return it. */
11451 static struct cgraph_node *
11452 simd_clone_create (struct cgraph_node *old_node)
11454 struct cgraph_node *new_node;
11455 if (old_node->definition)
11457 if (!old_node->has_gimple_body_p ())
11458 return NULL;
11459 old_node->get_body ();
11460 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
11461 false, NULL, NULL,
11462 "simdclone");
11464 else
11466 tree old_decl = old_node->decl;
11467 tree new_decl = copy_node (old_node->decl);
11468 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
11469 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
11470 SET_DECL_RTL (new_decl, NULL);
11471 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
11472 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
11473 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
11474 symtab->call_cgraph_insertion_hooks (new_node);
11476 if (new_node == NULL)
11477 return new_node;
11479 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
11481 /* The function cgraph_function_versioning () will force the new
11482 symbol local. Undo this, and inherit external visability from
11483 the old node. */
11484 new_node->local.local = old_node->local.local;
11485 new_node->externally_visible = old_node->externally_visible;
11487 return new_node;
11490 /* Adjust the return type of the given function to its appropriate
11491 vector counterpart. Returns a simd array to be used throughout the
11492 function as a return value. */
11494 static tree
11495 simd_clone_adjust_return_type (struct cgraph_node *node)
11497 tree fndecl = node->decl;
11498 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
11499 unsigned int veclen;
11500 tree t;
11502 /* Adjust the function return type. */
11503 if (orig_rettype == void_type_node)
11504 return NULL_TREE;
11505 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
11506 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))
11507 || POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl))))
11508 veclen = node->simdclone->vecsize_int;
11509 else
11510 veclen = node->simdclone->vecsize_float;
11511 veclen /= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl))));
11512 if (veclen > node->simdclone->simdlen)
11513 veclen = node->simdclone->simdlen;
11514 if (veclen == node->simdclone->simdlen)
11515 TREE_TYPE (TREE_TYPE (fndecl))
11516 = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)),
11517 node->simdclone->simdlen);
11518 else
11520 t = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)), veclen);
11521 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
11522 TREE_TYPE (TREE_TYPE (fndecl)) = t;
11524 if (!node->definition)
11525 return NULL_TREE;
11527 t = DECL_RESULT (fndecl);
11528 /* Adjust the DECL_RESULT. */
11529 gcc_assert (TREE_TYPE (t) != void_type_node);
11530 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
11531 relayout_decl (t);
11533 tree atype = build_array_type_nelts (orig_rettype,
11534 node->simdclone->simdlen);
11535 if (veclen != node->simdclone->simdlen)
11536 return build1 (VIEW_CONVERT_EXPR, atype, t);
11538 /* Set up a SIMD array to use as the return value. */
11539 tree retval = create_tmp_var_raw (atype, "retval");
11540 gimple_add_tmp_var (retval);
11541 return retval;
11544 /* Each vector argument has a corresponding array to be used locally
11545 as part of the eventual loop. Create such temporary array and
11546 return it.
11548 PREFIX is the prefix to be used for the temporary.
11550 TYPE is the inner element type.
11552 SIMDLEN is the number of elements. */
11554 static tree
11555 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
11557 tree atype = build_array_type_nelts (type, simdlen);
11558 tree avar = create_tmp_var_raw (atype, prefix);
11559 gimple_add_tmp_var (avar);
11560 return avar;
11563 /* Modify the function argument types to their corresponding vector
11564 counterparts if appropriate. Also, create one array for each simd
11565 argument to be used locally when using the function arguments as
11566 part of the loop.
11568 NODE is the function whose arguments are to be adjusted.
11570 Returns an adjustment vector that will be filled describing how the
11571 argument types will be adjusted. */
11573 static ipa_parm_adjustment_vec
11574 simd_clone_adjust_argument_types (struct cgraph_node *node)
11576 vec<tree> args;
11577 ipa_parm_adjustment_vec adjustments;
11579 if (node->definition)
11580 args = ipa_get_vector_of_formal_parms (node->decl);
11581 else
11582 args = simd_clone_vector_of_formal_parm_types (node->decl);
11583 adjustments.create (args.length ());
11584 unsigned i, j, veclen;
11585 struct ipa_parm_adjustment adj;
11586 for (i = 0; i < node->simdclone->nargs; ++i)
11588 memset (&adj, 0, sizeof (adj));
11589 tree parm = args[i];
11590 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
11591 adj.base_index = i;
11592 adj.base = parm;
11594 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
11595 node->simdclone->args[i].orig_type = parm_type;
11597 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
11599 /* No adjustment necessary for scalar arguments. */
11600 adj.op = IPA_PARM_OP_COPY;
11602 else
11604 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
11605 veclen = node->simdclone->vecsize_int;
11606 else
11607 veclen = node->simdclone->vecsize_float;
11608 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
11609 if (veclen > node->simdclone->simdlen)
11610 veclen = node->simdclone->simdlen;
11611 adj.arg_prefix = "simd";
11612 adj.type = build_vector_type (parm_type, veclen);
11613 node->simdclone->args[i].vector_type = adj.type;
11614 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11616 adjustments.safe_push (adj);
11617 if (j == veclen)
11619 memset (&adj, 0, sizeof (adj));
11620 adj.op = IPA_PARM_OP_NEW;
11621 adj.arg_prefix = "simd";
11622 adj.base_index = i;
11623 adj.type = node->simdclone->args[i].vector_type;
11627 if (node->definition)
11628 node->simdclone->args[i].simd_array
11629 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
11630 parm_type, node->simdclone->simdlen);
11632 adjustments.safe_push (adj);
11635 if (node->simdclone->inbranch)
11637 tree base_type
11638 = simd_clone_compute_base_data_type (node->simdclone->origin,
11639 node->simdclone);
11641 memset (&adj, 0, sizeof (adj));
11642 adj.op = IPA_PARM_OP_NEW;
11643 adj.arg_prefix = "mask";
11645 adj.base_index = i;
11646 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
11647 veclen = node->simdclone->vecsize_int;
11648 else
11649 veclen = node->simdclone->vecsize_float;
11650 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
11651 if (veclen > node->simdclone->simdlen)
11652 veclen = node->simdclone->simdlen;
11653 adj.type = build_vector_type (base_type, veclen);
11654 adjustments.safe_push (adj);
11656 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11657 adjustments.safe_push (adj);
11659 /* We have previously allocated one extra entry for the mask. Use
11660 it and fill it. */
11661 struct cgraph_simd_clone *sc = node->simdclone;
11662 sc->nargs++;
11663 if (node->definition)
11665 sc->args[i].orig_arg
11666 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
11667 sc->args[i].simd_array
11668 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
11670 sc->args[i].orig_type = base_type;
11671 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
11674 if (node->definition)
11675 ipa_modify_formal_parameters (node->decl, adjustments);
11676 else
11678 tree new_arg_types = NULL_TREE, new_reversed;
11679 bool last_parm_void = false;
11680 if (args.length () > 0 && args.last () == void_type_node)
11681 last_parm_void = true;
11683 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
11684 j = adjustments.length ();
11685 for (i = 0; i < j; i++)
11687 struct ipa_parm_adjustment *adj = &adjustments[i];
11688 tree ptype;
11689 if (adj->op == IPA_PARM_OP_COPY)
11690 ptype = args[adj->base_index];
11691 else
11692 ptype = adj->type;
11693 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
11695 new_reversed = nreverse (new_arg_types);
11696 if (last_parm_void)
11698 if (new_reversed)
11699 TREE_CHAIN (new_arg_types) = void_list_node;
11700 else
11701 new_reversed = void_list_node;
11704 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
11705 TYPE_ARG_TYPES (new_type) = new_reversed;
11706 TREE_TYPE (node->decl) = new_type;
11708 adjustments.release ();
11710 args.release ();
11711 return adjustments;
11714 /* Initialize and copy the function arguments in NODE to their
11715 corresponding local simd arrays. Returns a fresh gimple_seq with
11716 the instruction sequence generated. */
11718 static gimple_seq
11719 simd_clone_init_simd_arrays (struct cgraph_node *node,
11720 ipa_parm_adjustment_vec adjustments)
11722 gimple_seq seq = NULL;
11723 unsigned i = 0, j = 0, k;
11725 for (tree arg = DECL_ARGUMENTS (node->decl);
11726 arg;
11727 arg = DECL_CHAIN (arg), i++, j++)
11729 if (adjustments[j].op == IPA_PARM_OP_COPY)
11730 continue;
11732 node->simdclone->args[i].vector_arg = arg;
11734 tree array = node->simdclone->args[i].simd_array;
11735 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
11737 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11738 tree ptr = build_fold_addr_expr (array);
11739 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11740 build_int_cst (ptype, 0));
11741 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11742 gimplify_and_add (t, &seq);
11744 else
11746 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
11747 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11748 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
11750 tree ptr = build_fold_addr_expr (array);
11751 int elemsize;
11752 if (k)
11754 arg = DECL_CHAIN (arg);
11755 j++;
11757 elemsize
11758 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
11759 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11760 build_int_cst (ptype, k * elemsize));
11761 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11762 gimplify_and_add (t, &seq);
11766 return seq;
11769 /* Callback info for ipa_simd_modify_stmt_ops below. */
11771 struct modify_stmt_info {
11772 ipa_parm_adjustment_vec adjustments;
11773 gimple stmt;
11774 /* True if the parent statement was modified by
11775 ipa_simd_modify_stmt_ops. */
11776 bool modified;
11779 /* Callback for walk_gimple_op.
11781 Adjust operands from a given statement as specified in the
11782 adjustments vector in the callback data. */
11784 static tree
11785 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
11787 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
11788 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
11789 tree *orig_tp = tp;
11790 if (TREE_CODE (*tp) == ADDR_EXPR)
11791 tp = &TREE_OPERAND (*tp, 0);
11792 struct ipa_parm_adjustment *cand = NULL;
11793 if (TREE_CODE (*tp) == PARM_DECL)
11794 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
11795 else
11797 if (TYPE_P (*tp))
11798 *walk_subtrees = 0;
11801 tree repl = NULL_TREE;
11802 if (cand)
11803 repl = unshare_expr (cand->new_decl);
11804 else
11806 if (tp != orig_tp)
11808 *walk_subtrees = 0;
11809 bool modified = info->modified;
11810 info->modified = false;
11811 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
11812 if (!info->modified)
11814 info->modified = modified;
11815 return NULL_TREE;
11817 info->modified = modified;
11818 repl = *tp;
11820 else
11821 return NULL_TREE;
11824 if (tp != orig_tp)
11826 repl = build_fold_addr_expr (repl);
11827 gimple stmt;
11828 if (is_gimple_debug (info->stmt))
11830 tree vexpr = make_node (DEBUG_EXPR_DECL);
11831 stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
11832 DECL_ARTIFICIAL (vexpr) = 1;
11833 TREE_TYPE (vexpr) = TREE_TYPE (repl);
11834 DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (repl));
11835 repl = vexpr;
11837 else
11839 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl),
11840 NULL), repl);
11841 repl = gimple_assign_lhs (stmt);
11843 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
11844 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11845 *orig_tp = repl;
11847 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
11849 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
11850 *tp = vce;
11852 else
11853 *tp = repl;
11855 info->modified = true;
11856 return NULL_TREE;
11859 /* Traverse the function body and perform all modifications as
11860 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11861 modified such that the replacement/reduction value will now be an
11862 offset into the corresponding simd_array.
11864 This function will replace all function argument uses with their
11865 corresponding simd array elements, and ajust the return values
11866 accordingly. */
11868 static void
11869 ipa_simd_modify_function_body (struct cgraph_node *node,
11870 ipa_parm_adjustment_vec adjustments,
11871 tree retval_array, tree iter)
11873 basic_block bb;
11874 unsigned int i, j, l;
11876 /* Re-use the adjustments array, but this time use it to replace
11877 every function argument use to an offset into the corresponding
11878 simd_array. */
11879 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
11881 if (!node->simdclone->args[i].vector_arg)
11882 continue;
11884 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
11885 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
11886 adjustments[j].new_decl
11887 = build4 (ARRAY_REF,
11888 basetype,
11889 node->simdclone->args[i].simd_array,
11890 iter,
11891 NULL_TREE, NULL_TREE);
11892 if (adjustments[j].op == IPA_PARM_OP_NONE
11893 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
11894 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
11897 l = adjustments.length ();
11898 for (i = 1; i < num_ssa_names; i++)
11900 tree name = ssa_name (i);
11901 if (name
11902 && SSA_NAME_VAR (name)
11903 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
11905 for (j = 0; j < l; j++)
11906 if (SSA_NAME_VAR (name) == adjustments[j].base
11907 && adjustments[j].new_decl)
11909 tree base_var;
11910 if (adjustments[j].new_ssa_base == NULL_TREE)
11912 base_var
11913 = copy_var_decl (adjustments[j].base,
11914 DECL_NAME (adjustments[j].base),
11915 TREE_TYPE (adjustments[j].base));
11916 adjustments[j].new_ssa_base = base_var;
11918 else
11919 base_var = adjustments[j].new_ssa_base;
11920 if (SSA_NAME_IS_DEFAULT_DEF (name))
11922 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11923 gimple_stmt_iterator gsi = gsi_after_labels (bb);
11924 tree new_decl = unshare_expr (adjustments[j].new_decl);
11925 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
11926 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11927 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
11928 gimple stmt = gimple_build_assign (name, new_decl);
11929 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11931 else
11932 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11937 struct modify_stmt_info info;
11938 info.adjustments = adjustments;
11940 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
11942 gimple_stmt_iterator gsi;
11944 gsi = gsi_start_bb (bb);
11945 while (!gsi_end_p (gsi))
11947 gimple stmt = gsi_stmt (gsi);
11948 info.stmt = stmt;
11949 struct walk_stmt_info wi;
11951 memset (&wi, 0, sizeof (wi));
11952 info.modified = false;
11953 wi.info = &info;
11954 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
11956 if (gimple_return return_stmt = dyn_cast <gimple_return> (stmt))
11958 tree retval = gimple_return_retval (return_stmt);
11959 if (!retval)
11961 gsi_remove (&gsi, true);
11962 continue;
11965 /* Replace `return foo' with `retval_array[iter] = foo'. */
11966 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
11967 retval_array, iter, NULL, NULL);
11968 stmt = gimple_build_assign (ref, retval);
11969 gsi_replace (&gsi, stmt, true);
11970 info.modified = true;
11973 if (info.modified)
11975 update_stmt (stmt);
11976 if (maybe_clean_eh_stmt (stmt))
11977 gimple_purge_dead_eh_edges (gimple_bb (stmt));
11979 gsi_next (&gsi);
11984 /* Adjust the argument types in NODE to their appropriate vector
11985 counterparts. */
11987 static void
11988 simd_clone_adjust (struct cgraph_node *node)
11990 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
11992 targetm.simd_clone.adjust (node);
11994 tree retval = simd_clone_adjust_return_type (node);
11995 ipa_parm_adjustment_vec adjustments
11996 = simd_clone_adjust_argument_types (node);
11998 push_gimplify_context ();
12000 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
12002 /* Adjust all uses of vector arguments accordingly. Adjust all
12003 return values accordingly. */
12004 tree iter = create_tmp_var (unsigned_type_node, "iter");
12005 tree iter1 = make_ssa_name (iter, NULL);
12006 tree iter2 = make_ssa_name (iter, NULL);
12007 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
12009 /* Initialize the iteration variable. */
12010 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12011 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
12012 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
12013 /* Insert the SIMD array and iv initialization at function
12014 entry. */
12015 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
12017 pop_gimplify_context (NULL);
12019 /* Create a new BB right before the original exit BB, to hold the
12020 iteration increment and the condition/branch. */
12021 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
12022 basic_block incr_bb = create_empty_bb (orig_exit);
12023 add_bb_to_loop (incr_bb, body_bb->loop_father);
12024 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
12025 flag. Set it now to be a FALLTHRU_EDGE. */
12026 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
12027 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
12028 for (unsigned i = 0;
12029 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
12031 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
12032 redirect_edge_succ (e, incr_bb);
12034 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
12035 e->probability = REG_BR_PROB_BASE;
12036 gsi = gsi_last_bb (incr_bb);
12037 gimple g = gimple_build_assign_with_ops (PLUS_EXPR, iter2, iter1,
12038 build_int_cst (unsigned_type_node,
12039 1));
12040 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12042 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
12043 struct loop *loop = alloc_loop ();
12044 cfun->has_force_vectorize_loops = true;
12045 loop->safelen = node->simdclone->simdlen;
12046 loop->force_vectorize = true;
12047 loop->header = body_bb;
12049 /* Branch around the body if the mask applies. */
12050 if (node->simdclone->inbranch)
12052 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
12053 tree mask_array
12054 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
12055 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)), NULL);
12056 tree aref = build4 (ARRAY_REF,
12057 TREE_TYPE (TREE_TYPE (mask_array)),
12058 mask_array, iter1,
12059 NULL, NULL);
12060 g = gimple_build_assign (mask, aref);
12061 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12062 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
12063 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
12065 aref = build1 (VIEW_CONVERT_EXPR,
12066 build_nonstandard_integer_type (bitsize, 0), mask);
12067 mask = make_ssa_name (TREE_TYPE (aref), NULL);
12068 g = gimple_build_assign (mask, aref);
12069 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12072 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
12073 NULL, NULL);
12074 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12075 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
12076 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
12079 /* Generate the condition. */
12080 g = gimple_build_cond (LT_EXPR,
12081 iter2,
12082 build_int_cst (unsigned_type_node,
12083 node->simdclone->simdlen),
12084 NULL, NULL);
12085 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12086 e = split_block (incr_bb, gsi_stmt (gsi));
12087 basic_block latch_bb = e->dest;
12088 basic_block new_exit_bb;
12089 new_exit_bb = split_block (latch_bb, NULL)->dest;
12090 loop->latch = latch_bb;
12092 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
12094 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
12095 /* The successor of incr_bb is already pointing to latch_bb; just
12096 change the flags.
12097 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
12098 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
12100 gimple_phi phi = create_phi_node (iter1, body_bb);
12101 edge preheader_edge = find_edge (entry_bb, body_bb);
12102 edge latch_edge = single_succ_edge (latch_bb);
12103 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
12104 UNKNOWN_LOCATION);
12105 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12107 /* Generate the new return. */
12108 gsi = gsi_last_bb (new_exit_bb);
12109 if (retval
12110 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
12111 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
12112 retval = TREE_OPERAND (retval, 0);
12113 else if (retval)
12115 retval = build1 (VIEW_CONVERT_EXPR,
12116 TREE_TYPE (TREE_TYPE (node->decl)),
12117 retval);
12118 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
12119 false, GSI_CONTINUE_LINKING);
12121 g = gimple_build_return (retval);
12122 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12124 /* Handle aligned clauses by replacing default defs of the aligned
12125 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
12126 lhs. Handle linear by adding PHIs. */
12127 for (unsigned i = 0; i < node->simdclone->nargs; i++)
12128 if (node->simdclone->args[i].alignment
12129 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
12130 && (node->simdclone->args[i].alignment
12131 & (node->simdclone->args[i].alignment - 1)) == 0
12132 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
12133 == POINTER_TYPE)
12135 unsigned int alignment = node->simdclone->args[i].alignment;
12136 tree orig_arg = node->simdclone->args[i].orig_arg;
12137 tree def = ssa_default_def (cfun, orig_arg);
12138 if (def && !has_zero_uses (def))
12140 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
12141 gimple_seq seq = NULL;
12142 bool need_cvt = false;
12143 gimple_call call
12144 = gimple_build_call (fn, 2, def, size_int (alignment));
12145 g = call;
12146 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
12147 ptr_type_node))
12148 need_cvt = true;
12149 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg, NULL);
12150 gimple_call_set_lhs (g, t);
12151 gimple_seq_add_stmt_without_update (&seq, g);
12152 if (need_cvt)
12154 t = make_ssa_name (orig_arg, NULL);
12155 g = gimple_build_assign_with_ops (NOP_EXPR, t,
12156 gimple_call_lhs (g),
12157 NULL_TREE);
12158 gimple_seq_add_stmt_without_update (&seq, g);
12160 gsi_insert_seq_on_edge_immediate
12161 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
12163 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12164 int freq = compute_call_stmt_bb_frequency (current_function_decl,
12165 entry_bb);
12166 node->create_edge (cgraph_node::get_create (fn),
12167 call, entry_bb->count, freq);
12169 imm_use_iterator iter;
12170 use_operand_p use_p;
12171 gimple use_stmt;
12172 tree repl = gimple_get_lhs (g);
12173 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12174 if (is_gimple_debug (use_stmt) || use_stmt == call)
12175 continue;
12176 else
12177 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12178 SET_USE (use_p, repl);
12181 else if (node->simdclone->args[i].arg_type
12182 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
12184 tree orig_arg = node->simdclone->args[i].orig_arg;
12185 tree def = ssa_default_def (cfun, orig_arg);
12186 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12187 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
12188 if (def && !has_zero_uses (def))
12190 iter1 = make_ssa_name (orig_arg, NULL);
12191 iter2 = make_ssa_name (orig_arg, NULL);
12192 phi = create_phi_node (iter1, body_bb);
12193 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
12194 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12195 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12196 ? PLUS_EXPR : POINTER_PLUS_EXPR;
12197 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12198 ? TREE_TYPE (orig_arg) : sizetype;
12199 tree addcst
12200 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
12201 g = gimple_build_assign_with_ops (code, iter2, iter1, addcst);
12202 gsi = gsi_last_bb (incr_bb);
12203 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
12205 imm_use_iterator iter;
12206 use_operand_p use_p;
12207 gimple use_stmt;
12208 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12209 if (use_stmt == phi)
12210 continue;
12211 else
12212 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12213 SET_USE (use_p, iter1);
12217 calculate_dominance_info (CDI_DOMINATORS);
12218 add_loop (loop, loop->header->loop_father);
12219 update_ssa (TODO_update_ssa);
12221 pop_cfun ();
12224 /* If the function in NODE is tagged as an elemental SIMD function,
12225 create the appropriate SIMD clones. */
12227 static void
12228 expand_simd_clones (struct cgraph_node *node)
12230 tree attr = lookup_attribute ("omp declare simd",
12231 DECL_ATTRIBUTES (node->decl));
12232 if (attr == NULL_TREE
12233 || node->global.inlined_to
12234 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
12235 return;
12237 /* Ignore
12238 #pragma omp declare simd
12239 extern int foo ();
12240 in C, there we don't know the argument types at all. */
12241 if (!node->definition
12242 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
12243 return;
12247 /* Start with parsing the "omp declare simd" attribute(s). */
12248 bool inbranch_clause_specified;
12249 struct cgraph_simd_clone *clone_info
12250 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
12251 &inbranch_clause_specified);
12252 if (clone_info == NULL)
12253 continue;
12255 int orig_simdlen = clone_info->simdlen;
12256 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
12257 /* The target can return 0 (no simd clones should be created),
12258 1 (just one ISA of simd clones should be created) or higher
12259 count of ISA variants. In that case, clone_info is initialized
12260 for the first ISA variant. */
12261 int count
12262 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
12263 base_type, 0);
12264 if (count == 0)
12265 continue;
12267 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
12268 also create one inbranch and one !inbranch clone of it. */
12269 for (int i = 0; i < count * 2; i++)
12271 struct cgraph_simd_clone *clone = clone_info;
12272 if (inbranch_clause_specified && (i & 1) != 0)
12273 continue;
12275 if (i != 0)
12277 clone = simd_clone_struct_alloc (clone_info->nargs
12278 + ((i & 1) != 0));
12279 simd_clone_struct_copy (clone, clone_info);
12280 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
12281 and simd_clone_adjust_argument_types did to the first
12282 clone's info. */
12283 clone->nargs -= clone_info->inbranch;
12284 clone->simdlen = orig_simdlen;
12285 /* And call the target hook again to get the right ISA. */
12286 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
12287 base_type,
12288 i / 2);
12289 if ((i & 1) != 0)
12290 clone->inbranch = 1;
12293 /* simd_clone_mangle might fail if such a clone has been created
12294 already. */
12295 tree id = simd_clone_mangle (node, clone);
12296 if (id == NULL_TREE)
12297 continue;
12299 /* Only when we are sure we want to create the clone actually
12300 clone the function (or definitions) or create another
12301 extern FUNCTION_DECL (for prototypes without definitions). */
12302 struct cgraph_node *n = simd_clone_create (node);
12303 if (n == NULL)
12304 continue;
12306 n->simdclone = clone;
12307 clone->origin = node;
12308 clone->next_clone = NULL;
12309 if (node->simd_clones == NULL)
12311 clone->prev_clone = n;
12312 node->simd_clones = n;
12314 else
12316 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
12317 clone->prev_clone->simdclone->next_clone = n;
12318 node->simd_clones->simdclone->prev_clone = n;
12320 symtab->change_decl_assembler_name (n->decl, id);
12321 /* And finally adjust the return type, parameters and for
12322 definitions also function body. */
12323 if (node->definition)
12324 simd_clone_adjust (n);
12325 else
12327 simd_clone_adjust_return_type (n);
12328 simd_clone_adjust_argument_types (n);
12332 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
12335 /* Entry point for IPA simd clone creation pass. */
12337 static unsigned int
12338 ipa_omp_simd_clone (void)
12340 struct cgraph_node *node;
12341 FOR_EACH_FUNCTION (node)
12342 expand_simd_clones (node);
12343 return 0;
12346 namespace {
12348 const pass_data pass_data_omp_simd_clone =
12350 SIMPLE_IPA_PASS, /* type */
12351 "simdclone", /* name */
12352 OPTGROUP_NONE, /* optinfo_flags */
12353 TV_NONE, /* tv_id */
12354 ( PROP_ssa | PROP_cfg ), /* properties_required */
12355 0, /* properties_provided */
12356 0, /* properties_destroyed */
12357 0, /* todo_flags_start */
12358 0, /* todo_flags_finish */
12361 class pass_omp_simd_clone : public simple_ipa_opt_pass
12363 public:
12364 pass_omp_simd_clone(gcc::context *ctxt)
12365 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
12368 /* opt_pass methods: */
12369 virtual bool gate (function *);
12370 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
12373 bool
12374 pass_omp_simd_clone::gate (function *)
12376 return ((flag_openmp || flag_openmp_simd
12377 || flag_cilkplus
12378 || (in_lto_p && !flag_wpa))
12379 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
12382 } // anon namespace
12384 simple_ipa_opt_pass *
12385 make_pass_omp_simd_clone (gcc::context *ctxt)
12387 return new pass_omp_simd_clone (ctxt);
12390 #include "gt-omp-low.h"