Introduce gimple_omp_parallel
[official-gcc.git] / gcc / omp-low.c
blob46155d6ea5272182092aa1c598c17d0a06d6cbf7
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "rtl.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "gimple-fold.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-walk.h"
43 #include "tree-iterator.h"
44 #include "tree-inline.h"
45 #include "langhooks.h"
46 #include "diagnostic-core.h"
47 #include "gimple-ssa.h"
48 #include "cgraph.h"
49 #include "tree-cfg.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "tree-ssanames.h"
53 #include "tree-into-ssa.h"
54 #include "expr.h"
55 #include "tree-dfa.h"
56 #include "tree-ssa.h"
57 #include "flags.h"
58 #include "function.h"
59 #include "expr.h"
60 #include "tree-pass.h"
61 #include "except.h"
62 #include "splay-tree.h"
63 #include "optabs.h"
64 #include "cfgloop.h"
65 #include "target.h"
66 #include "omp-low.h"
67 #include "gimple-low.h"
68 #include "tree-cfgcleanup.h"
69 #include "pretty-print.h"
70 #include "ipa-prop.h"
71 #include "tree-nested.h"
72 #include "tree-eh.h"
73 #include "cilk.h"
76 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
77 phases. The first phase scans the function looking for OMP statements
78 and then for variables that must be replaced to satisfy data sharing
79 clauses. The second phase expands code for the constructs, as well as
80 re-gimplifying things when variables have been replaced with complex
81 expressions.
83 Final code generation is done by pass_expand_omp. The flowgraph is
84 scanned for parallel regions which are then moved to a new
85 function, to be invoked by the thread library. */
87 /* Parallel region information. Every parallel and workshare
88 directive is enclosed between two markers, the OMP_* directive
89 and a corresponding OMP_RETURN statement. */
91 struct omp_region
93 /* The enclosing region. */
94 struct omp_region *outer;
96 /* First child region. */
97 struct omp_region *inner;
99 /* Next peer region. */
100 struct omp_region *next;
102 /* Block containing the omp directive as its last stmt. */
103 basic_block entry;
105 /* Block containing the OMP_RETURN as its last stmt. */
106 basic_block exit;
108 /* Block containing the OMP_CONTINUE as its last stmt. */
109 basic_block cont;
111 /* If this is a combined parallel+workshare region, this is a list
112 of additional arguments needed by the combined parallel+workshare
113 library call. */
114 vec<tree, va_gc> *ws_args;
116 /* The code for the omp directive of this region. */
117 enum gimple_code type;
119 /* Schedule kind, only used for OMP_FOR type regions. */
120 enum omp_clause_schedule_kind sched_kind;
122 /* True if this is a combined parallel+workshare region. */
123 bool is_combined_parallel;
126 /* Context structure. Used to store information about each parallel
127 directive in the code. */
129 typedef struct omp_context
131 /* This field must be at the beginning, as we do "inheritance": Some
132 callback functions for tree-inline.c (e.g., omp_copy_decl)
133 receive a copy_body_data pointer that is up-casted to an
134 omp_context pointer. */
135 copy_body_data cb;
137 /* The tree of contexts corresponding to the encountered constructs. */
138 struct omp_context *outer;
139 gimple stmt;
141 /* Map variables to fields in a structure that allows communication
142 between sending and receiving threads. */
143 splay_tree field_map;
144 tree record_type;
145 tree sender_decl;
146 tree receiver_decl;
148 /* These are used just by task contexts, if task firstprivate fn is
149 needed. srecord_type is used to communicate from the thread
150 that encountered the task construct to task firstprivate fn,
151 record_type is allocated by GOMP_task, initialized by task firstprivate
152 fn and passed to the task body fn. */
153 splay_tree sfield_map;
154 tree srecord_type;
156 /* A chain of variables to add to the top-level block surrounding the
157 construct. In the case of a parallel, this is in the child function. */
158 tree block_vars;
160 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
161 barriers should jump to during omplower pass. */
162 tree cancel_label;
164 /* What to do with variables with implicitly determined sharing
165 attributes. */
166 enum omp_clause_default_kind default_kind;
168 /* Nesting depth of this context. Used to beautify error messages re
169 invalid gotos. The outermost ctx is depth 1, with depth 0 being
170 reserved for the main body of the function. */
171 int depth;
173 /* True if this parallel directive is nested within another. */
174 bool is_nested;
176 /* True if this construct can be cancelled. */
177 bool cancellable;
178 } omp_context;
181 struct omp_for_data_loop
183 tree v, n1, n2, step;
184 enum tree_code cond_code;
187 /* A structure describing the main elements of a parallel loop. */
189 struct omp_for_data
191 struct omp_for_data_loop loop;
192 tree chunk_size;
193 gimple_omp_for for_stmt;
194 tree pre, iter_type;
195 int collapse;
196 bool have_nowait, have_ordered;
197 enum omp_clause_schedule_kind sched_kind;
198 struct omp_for_data_loop *loops;
202 static splay_tree all_contexts;
203 static int taskreg_nesting_level;
204 static int target_nesting_level;
205 static struct omp_region *root_omp_region;
206 static bitmap task_shared_vars;
207 static vec<omp_context *> taskreg_contexts;
209 static void scan_omp (gimple_seq *, omp_context *);
210 static tree scan_omp_1_op (tree *, int *, void *);
212 #define WALK_SUBSTMTS \
213 case GIMPLE_BIND: \
214 case GIMPLE_TRY: \
215 case GIMPLE_CATCH: \
216 case GIMPLE_EH_FILTER: \
217 case GIMPLE_TRANSACTION: \
218 /* The sub-statements for these should be walked. */ \
219 *handled_ops_p = false; \
220 break;
222 /* Convenience function for calling scan_omp_1_op on tree operands. */
224 static inline tree
225 scan_omp_op (tree *tp, omp_context *ctx)
227 struct walk_stmt_info wi;
229 memset (&wi, 0, sizeof (wi));
230 wi.info = ctx;
231 wi.want_locations = true;
233 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
236 static void lower_omp (gimple_seq *, omp_context *);
237 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
238 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
240 /* Find an OpenMP clause of type KIND within CLAUSES. */
242 tree
243 find_omp_clause (tree clauses, enum omp_clause_code kind)
245 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
246 if (OMP_CLAUSE_CODE (clauses) == kind)
247 return clauses;
249 return NULL_TREE;
252 /* Return true if CTX is for an omp parallel. */
254 static inline bool
255 is_parallel_ctx (omp_context *ctx)
257 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
261 /* Return true if CTX is for an omp task. */
263 static inline bool
264 is_task_ctx (omp_context *ctx)
266 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
270 /* Return true if CTX is for an omp parallel or omp task. */
272 static inline bool
273 is_taskreg_ctx (omp_context *ctx)
275 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
276 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
280 /* Return true if REGION is a combined parallel+workshare region. */
282 static inline bool
283 is_combined_parallel (struct omp_region *region)
285 return region->is_combined_parallel;
289 /* Extract the header elements of parallel loop FOR_STMT and store
290 them into *FD. */
292 static void
293 extract_omp_for_data (gimple_omp_for for_stmt, struct omp_for_data *fd,
294 struct omp_for_data_loop *loops)
296 tree t, var, *collapse_iter, *collapse_count;
297 tree count = NULL_TREE, iter_type = long_integer_type_node;
298 struct omp_for_data_loop *loop;
299 int i;
300 struct omp_for_data_loop dummy_loop;
301 location_t loc = gimple_location (for_stmt);
302 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
303 bool distribute = gimple_omp_for_kind (for_stmt)
304 == GF_OMP_FOR_KIND_DISTRIBUTE;
306 fd->for_stmt = for_stmt;
307 fd->pre = NULL;
308 fd->collapse = gimple_omp_for_collapse (for_stmt);
309 if (fd->collapse > 1)
310 fd->loops = loops;
311 else
312 fd->loops = &fd->loop;
314 fd->have_nowait = distribute || simd;
315 fd->have_ordered = false;
316 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
317 fd->chunk_size = NULL_TREE;
318 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
319 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
320 collapse_iter = NULL;
321 collapse_count = NULL;
323 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
324 switch (OMP_CLAUSE_CODE (t))
326 case OMP_CLAUSE_NOWAIT:
327 fd->have_nowait = true;
328 break;
329 case OMP_CLAUSE_ORDERED:
330 fd->have_ordered = true;
331 break;
332 case OMP_CLAUSE_SCHEDULE:
333 gcc_assert (!distribute);
334 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
335 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
336 break;
337 case OMP_CLAUSE_DIST_SCHEDULE:
338 gcc_assert (distribute);
339 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
340 break;
341 case OMP_CLAUSE_COLLAPSE:
342 if (fd->collapse > 1)
344 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
345 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
347 break;
348 default:
349 break;
352 /* FIXME: for now map schedule(auto) to schedule(static).
353 There should be analysis to determine whether all iterations
354 are approximately the same amount of work (then schedule(static)
355 is best) or if it varies (then schedule(dynamic,N) is better). */
356 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
358 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
359 gcc_assert (fd->chunk_size == NULL);
361 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
362 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
363 gcc_assert (fd->chunk_size == NULL);
364 else if (fd->chunk_size == NULL)
366 /* We only need to compute a default chunk size for ordered
367 static loops and dynamic loops. */
368 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
369 || fd->have_ordered)
370 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
371 ? integer_zero_node : integer_one_node;
374 for (i = 0; i < fd->collapse; i++)
376 if (fd->collapse == 1)
377 loop = &fd->loop;
378 else if (loops != NULL)
379 loop = loops + i;
380 else
381 loop = &dummy_loop;
383 loop->v = gimple_omp_for_index (for_stmt, i);
384 gcc_assert (SSA_VAR_P (loop->v));
385 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
386 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
387 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
388 loop->n1 = gimple_omp_for_initial (for_stmt, i);
390 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
391 loop->n2 = gimple_omp_for_final (for_stmt, i);
392 switch (loop->cond_code)
394 case LT_EXPR:
395 case GT_EXPR:
396 break;
397 case NE_EXPR:
398 gcc_assert (gimple_omp_for_kind (for_stmt)
399 == GF_OMP_FOR_KIND_CILKSIMD
400 || (gimple_omp_for_kind (for_stmt)
401 == GF_OMP_FOR_KIND_CILKFOR));
402 break;
403 case LE_EXPR:
404 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
405 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
406 else
407 loop->n2 = fold_build2_loc (loc,
408 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
409 build_int_cst (TREE_TYPE (loop->n2), 1));
410 loop->cond_code = LT_EXPR;
411 break;
412 case GE_EXPR:
413 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
414 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
415 else
416 loop->n2 = fold_build2_loc (loc,
417 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
418 build_int_cst (TREE_TYPE (loop->n2), 1));
419 loop->cond_code = GT_EXPR;
420 break;
421 default:
422 gcc_unreachable ();
425 t = gimple_omp_for_incr (for_stmt, i);
426 gcc_assert (TREE_OPERAND (t, 0) == var);
427 switch (TREE_CODE (t))
429 case PLUS_EXPR:
430 loop->step = TREE_OPERAND (t, 1);
431 break;
432 case POINTER_PLUS_EXPR:
433 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
434 break;
435 case MINUS_EXPR:
436 loop->step = TREE_OPERAND (t, 1);
437 loop->step = fold_build1_loc (loc,
438 NEGATE_EXPR, TREE_TYPE (loop->step),
439 loop->step);
440 break;
441 default:
442 gcc_unreachable ();
445 if (simd
446 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
447 && !fd->have_ordered))
449 if (fd->collapse == 1)
450 iter_type = TREE_TYPE (loop->v);
451 else if (i == 0
452 || TYPE_PRECISION (iter_type)
453 < TYPE_PRECISION (TREE_TYPE (loop->v)))
454 iter_type
455 = build_nonstandard_integer_type
456 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
458 else if (iter_type != long_long_unsigned_type_node)
460 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
461 iter_type = long_long_unsigned_type_node;
462 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
463 && TYPE_PRECISION (TREE_TYPE (loop->v))
464 >= TYPE_PRECISION (iter_type))
466 tree n;
468 if (loop->cond_code == LT_EXPR)
469 n = fold_build2_loc (loc,
470 PLUS_EXPR, TREE_TYPE (loop->v),
471 loop->n2, loop->step);
472 else
473 n = loop->n1;
474 if (TREE_CODE (n) != INTEGER_CST
475 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
476 iter_type = long_long_unsigned_type_node;
478 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
479 > TYPE_PRECISION (iter_type))
481 tree n1, n2;
483 if (loop->cond_code == LT_EXPR)
485 n1 = loop->n1;
486 n2 = fold_build2_loc (loc,
487 PLUS_EXPR, TREE_TYPE (loop->v),
488 loop->n2, loop->step);
490 else
492 n1 = fold_build2_loc (loc,
493 MINUS_EXPR, TREE_TYPE (loop->v),
494 loop->n2, loop->step);
495 n2 = loop->n1;
497 if (TREE_CODE (n1) != INTEGER_CST
498 || TREE_CODE (n2) != INTEGER_CST
499 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
500 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
501 iter_type = long_long_unsigned_type_node;
505 if (collapse_count && *collapse_count == NULL)
507 t = fold_binary (loop->cond_code, boolean_type_node,
508 fold_convert (TREE_TYPE (loop->v), loop->n1),
509 fold_convert (TREE_TYPE (loop->v), loop->n2));
510 if (t && integer_zerop (t))
511 count = build_zero_cst (long_long_unsigned_type_node);
512 else if ((i == 0 || count != NULL_TREE)
513 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
514 && TREE_CONSTANT (loop->n1)
515 && TREE_CONSTANT (loop->n2)
516 && TREE_CODE (loop->step) == INTEGER_CST)
518 tree itype = TREE_TYPE (loop->v);
520 if (POINTER_TYPE_P (itype))
521 itype = signed_type_for (itype);
522 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
523 t = fold_build2_loc (loc,
524 PLUS_EXPR, itype,
525 fold_convert_loc (loc, itype, loop->step), t);
526 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
527 fold_convert_loc (loc, itype, loop->n2));
528 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
529 fold_convert_loc (loc, itype, loop->n1));
530 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
531 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
532 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
533 fold_build1_loc (loc, NEGATE_EXPR, itype,
534 fold_convert_loc (loc, itype,
535 loop->step)));
536 else
537 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
538 fold_convert_loc (loc, itype, loop->step));
539 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
540 if (count != NULL_TREE)
541 count = fold_build2_loc (loc,
542 MULT_EXPR, long_long_unsigned_type_node,
543 count, t);
544 else
545 count = t;
546 if (TREE_CODE (count) != INTEGER_CST)
547 count = NULL_TREE;
549 else if (count && !integer_zerop (count))
550 count = NULL_TREE;
554 if (count
555 && !simd
556 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
557 || fd->have_ordered))
559 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
560 iter_type = long_long_unsigned_type_node;
561 else
562 iter_type = long_integer_type_node;
564 else if (collapse_iter && *collapse_iter != NULL)
565 iter_type = TREE_TYPE (*collapse_iter);
566 fd->iter_type = iter_type;
567 if (collapse_iter && *collapse_iter == NULL)
568 *collapse_iter = create_tmp_var (iter_type, ".iter");
569 if (collapse_count && *collapse_count == NULL)
571 if (count)
572 *collapse_count = fold_convert_loc (loc, iter_type, count);
573 else
574 *collapse_count = create_tmp_var (iter_type, ".count");
577 if (fd->collapse > 1)
579 fd->loop.v = *collapse_iter;
580 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
581 fd->loop.n2 = *collapse_count;
582 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
583 fd->loop.cond_code = LT_EXPR;
588 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
589 is the immediate dominator of PAR_ENTRY_BB, return true if there
590 are no data dependencies that would prevent expanding the parallel
591 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
593 When expanding a combined parallel+workshare region, the call to
594 the child function may need additional arguments in the case of
595 GIMPLE_OMP_FOR regions. In some cases, these arguments are
596 computed out of variables passed in from the parent to the child
597 via 'struct .omp_data_s'. For instance:
599 #pragma omp parallel for schedule (guided, i * 4)
600 for (j ...)
602 Is lowered into:
604 # BLOCK 2 (PAR_ENTRY_BB)
605 .omp_data_o.i = i;
606 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
608 # BLOCK 3 (WS_ENTRY_BB)
609 .omp_data_i = &.omp_data_o;
610 D.1667 = .omp_data_i->i;
611 D.1598 = D.1667 * 4;
612 #pragma omp for schedule (guided, D.1598)
614 When we outline the parallel region, the call to the child function
615 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
616 that value is computed *after* the call site. So, in principle we
617 cannot do the transformation.
619 To see whether the code in WS_ENTRY_BB blocks the combined
620 parallel+workshare call, we collect all the variables used in the
621 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
622 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
623 call.
625 FIXME. If we had the SSA form built at this point, we could merely
626 hoist the code in block 3 into block 2 and be done with it. But at
627 this point we don't have dataflow information and though we could
628 hack something up here, it is really not worth the aggravation. */
630 static bool
631 workshare_safe_to_combine_p (basic_block ws_entry_bb)
633 struct omp_for_data fd;
634 gimple ws_stmt = last_stmt (ws_entry_bb);
636 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
637 return true;
639 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
641 extract_omp_for_data (as_a <gimple_omp_for> (ws_stmt), &fd, NULL);
643 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
644 return false;
645 if (fd.iter_type != long_integer_type_node)
646 return false;
648 /* FIXME. We give up too easily here. If any of these arguments
649 are not constants, they will likely involve variables that have
650 been mapped into fields of .omp_data_s for sharing with the child
651 function. With appropriate data flow, it would be possible to
652 see through this. */
653 if (!is_gimple_min_invariant (fd.loop.n1)
654 || !is_gimple_min_invariant (fd.loop.n2)
655 || !is_gimple_min_invariant (fd.loop.step)
656 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
657 return false;
659 return true;
663 /* Collect additional arguments needed to emit a combined
664 parallel+workshare call. WS_STMT is the workshare directive being
665 expanded. */
667 static vec<tree, va_gc> *
668 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
670 tree t;
671 location_t loc = gimple_location (ws_stmt);
672 vec<tree, va_gc> *ws_args;
674 if (gimple_omp_for for_stmt = dyn_cast <gimple_omp_for> (ws_stmt))
676 struct omp_for_data fd;
677 tree n1, n2;
679 extract_omp_for_data (for_stmt, &fd, NULL);
680 n1 = fd.loop.n1;
681 n2 = fd.loop.n2;
683 if (gimple_omp_for_combined_into_p (for_stmt))
685 tree innerc
686 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
687 OMP_CLAUSE__LOOPTEMP_);
688 gcc_assert (innerc);
689 n1 = OMP_CLAUSE_DECL (innerc);
690 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
691 OMP_CLAUSE__LOOPTEMP_);
692 gcc_assert (innerc);
693 n2 = OMP_CLAUSE_DECL (innerc);
696 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
698 t = fold_convert_loc (loc, long_integer_type_node, n1);
699 ws_args->quick_push (t);
701 t = fold_convert_loc (loc, long_integer_type_node, n2);
702 ws_args->quick_push (t);
704 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
705 ws_args->quick_push (t);
707 if (fd.chunk_size)
709 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
710 ws_args->quick_push (t);
713 return ws_args;
715 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
717 /* Number of sections is equal to the number of edges from the
718 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
719 the exit of the sections region. */
720 basic_block bb = single_succ (gimple_bb (ws_stmt));
721 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
722 vec_alloc (ws_args, 1);
723 ws_args->quick_push (t);
724 return ws_args;
727 gcc_unreachable ();
731 /* Discover whether REGION is a combined parallel+workshare region. */
733 static void
734 determine_parallel_type (struct omp_region *region)
736 basic_block par_entry_bb, par_exit_bb;
737 basic_block ws_entry_bb, ws_exit_bb;
739 if (region == NULL || region->inner == NULL
740 || region->exit == NULL || region->inner->exit == NULL
741 || region->inner->cont == NULL)
742 return;
744 /* We only support parallel+for and parallel+sections. */
745 if (region->type != GIMPLE_OMP_PARALLEL
746 || (region->inner->type != GIMPLE_OMP_FOR
747 && region->inner->type != GIMPLE_OMP_SECTIONS))
748 return;
750 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
751 WS_EXIT_BB -> PAR_EXIT_BB. */
752 par_entry_bb = region->entry;
753 par_exit_bb = region->exit;
754 ws_entry_bb = region->inner->entry;
755 ws_exit_bb = region->inner->exit;
757 if (single_succ (par_entry_bb) == ws_entry_bb
758 && single_succ (ws_exit_bb) == par_exit_bb
759 && workshare_safe_to_combine_p (ws_entry_bb)
760 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
761 || (last_and_only_stmt (ws_entry_bb)
762 && last_and_only_stmt (par_exit_bb))))
764 gimple par_stmt = last_stmt (par_entry_bb);
765 gimple ws_stmt = last_stmt (ws_entry_bb);
767 if (region->inner->type == GIMPLE_OMP_FOR)
769 /* If this is a combined parallel loop, we need to determine
770 whether or not to use the combined library calls. There
771 are two cases where we do not apply the transformation:
772 static loops and any kind of ordered loop. In the first
773 case, we already open code the loop so there is no need
774 to do anything else. In the latter case, the combined
775 parallel loop call would still need extra synchronization
776 to implement ordered semantics, so there would not be any
777 gain in using the combined call. */
778 tree clauses = gimple_omp_for_clauses (ws_stmt);
779 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
780 if (c == NULL
781 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
782 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
784 region->is_combined_parallel = false;
785 region->inner->is_combined_parallel = false;
786 return;
790 region->is_combined_parallel = true;
791 region->inner->is_combined_parallel = true;
792 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
797 /* Return true if EXPR is variable sized. */
799 static inline bool
800 is_variable_sized (const_tree expr)
802 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
805 /* Return true if DECL is a reference type. */
807 static inline bool
808 is_reference (tree decl)
810 return lang_hooks.decls.omp_privatize_by_reference (decl);
813 /* Lookup variables in the decl or field splay trees. The "maybe" form
814 allows for the variable form to not have been entered, otherwise we
815 assert that the variable must have been entered. */
817 static inline tree
818 lookup_decl (tree var, omp_context *ctx)
820 tree *n = ctx->cb.decl_map->get (var);
821 return *n;
824 static inline tree
825 maybe_lookup_decl (const_tree var, omp_context *ctx)
827 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
828 return n ? *n : NULL_TREE;
831 static inline tree
832 lookup_field (tree var, omp_context *ctx)
834 splay_tree_node n;
835 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
836 return (tree) n->value;
839 static inline tree
840 lookup_sfield (tree var, omp_context *ctx)
842 splay_tree_node n;
843 n = splay_tree_lookup (ctx->sfield_map
844 ? ctx->sfield_map : ctx->field_map,
845 (splay_tree_key) var);
846 return (tree) n->value;
849 static inline tree
850 maybe_lookup_field (tree var, omp_context *ctx)
852 splay_tree_node n;
853 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
854 return n ? (tree) n->value : NULL_TREE;
857 /* Return true if DECL should be copied by pointer. SHARED_CTX is
858 the parallel context if DECL is to be shared. */
860 static bool
861 use_pointer_for_field (tree decl, omp_context *shared_ctx)
863 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
864 return true;
866 /* We can only use copy-in/copy-out semantics for shared variables
867 when we know the value is not accessible from an outer scope. */
868 if (shared_ctx)
870 /* ??? Trivially accessible from anywhere. But why would we even
871 be passing an address in this case? Should we simply assert
872 this to be false, or should we have a cleanup pass that removes
873 these from the list of mappings? */
874 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
875 return true;
877 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
878 without analyzing the expression whether or not its location
879 is accessible to anyone else. In the case of nested parallel
880 regions it certainly may be. */
881 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
882 return true;
884 /* Do not use copy-in/copy-out for variables that have their
885 address taken. */
886 if (TREE_ADDRESSABLE (decl))
887 return true;
889 /* lower_send_shared_vars only uses copy-in, but not copy-out
890 for these. */
891 if (TREE_READONLY (decl)
892 || ((TREE_CODE (decl) == RESULT_DECL
893 || TREE_CODE (decl) == PARM_DECL)
894 && DECL_BY_REFERENCE (decl)))
895 return false;
897 /* Disallow copy-in/out in nested parallel if
898 decl is shared in outer parallel, otherwise
899 each thread could store the shared variable
900 in its own copy-in location, making the
901 variable no longer really shared. */
902 if (shared_ctx->is_nested)
904 omp_context *up;
906 for (up = shared_ctx->outer; up; up = up->outer)
907 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
908 break;
910 if (up)
912 tree c;
914 for (c = gimple_omp_taskreg_clauses (up->stmt);
915 c; c = OMP_CLAUSE_CHAIN (c))
916 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
917 && OMP_CLAUSE_DECL (c) == decl)
918 break;
920 if (c)
921 goto maybe_mark_addressable_and_ret;
925 /* For tasks avoid using copy-in/out. As tasks can be
926 deferred or executed in different thread, when GOMP_task
927 returns, the task hasn't necessarily terminated. */
928 if (is_task_ctx (shared_ctx))
930 tree outer;
931 maybe_mark_addressable_and_ret:
932 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
933 if (is_gimple_reg (outer))
935 /* Taking address of OUTER in lower_send_shared_vars
936 might need regimplification of everything that uses the
937 variable. */
938 if (!task_shared_vars)
939 task_shared_vars = BITMAP_ALLOC (NULL);
940 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
941 TREE_ADDRESSABLE (outer) = 1;
943 return true;
947 return false;
950 /* Construct a new automatic decl similar to VAR. */
952 static tree
953 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
955 tree copy = copy_var_decl (var, name, type);
957 DECL_CONTEXT (copy) = current_function_decl;
958 DECL_CHAIN (copy) = ctx->block_vars;
959 ctx->block_vars = copy;
961 return copy;
964 static tree
965 omp_copy_decl_1 (tree var, omp_context *ctx)
967 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
970 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
971 as appropriate. */
972 static tree
973 omp_build_component_ref (tree obj, tree field)
975 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
976 if (TREE_THIS_VOLATILE (field))
977 TREE_THIS_VOLATILE (ret) |= 1;
978 if (TREE_READONLY (field))
979 TREE_READONLY (ret) |= 1;
980 return ret;
983 /* Build tree nodes to access the field for VAR on the receiver side. */
985 static tree
986 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
988 tree x, field = lookup_field (var, ctx);
990 /* If the receiver record type was remapped in the child function,
991 remap the field into the new record type. */
992 x = maybe_lookup_field (field, ctx);
993 if (x != NULL)
994 field = x;
996 x = build_simple_mem_ref (ctx->receiver_decl);
997 x = omp_build_component_ref (x, field);
998 if (by_ref)
999 x = build_simple_mem_ref (x);
1001 return x;
1004 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1005 of a parallel, this is a component reference; for workshare constructs
1006 this is some variable. */
1008 static tree
1009 build_outer_var_ref (tree var, omp_context *ctx)
1011 tree x;
1013 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1014 x = var;
1015 else if (is_variable_sized (var))
1017 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1018 x = build_outer_var_ref (x, ctx);
1019 x = build_simple_mem_ref (x);
1021 else if (is_taskreg_ctx (ctx))
1023 bool by_ref = use_pointer_for_field (var, NULL);
1024 x = build_receiver_ref (var, by_ref, ctx);
1026 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1027 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1029 /* #pragma omp simd isn't a worksharing construct, and can reference even
1030 private vars in its linear etc. clauses. */
1031 x = NULL_TREE;
1032 if (ctx->outer && is_taskreg_ctx (ctx))
1033 x = lookup_decl (var, ctx->outer);
1034 else if (ctx->outer)
1035 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1036 if (x == NULL_TREE)
1037 x = var;
1039 else if (ctx->outer)
1040 x = lookup_decl (var, ctx->outer);
1041 else if (is_reference (var))
1042 /* This can happen with orphaned constructs. If var is reference, it is
1043 possible it is shared and as such valid. */
1044 x = var;
1045 else
1046 gcc_unreachable ();
1048 if (is_reference (var))
1049 x = build_simple_mem_ref (x);
1051 return x;
1054 /* Build tree nodes to access the field for VAR on the sender side. */
1056 static tree
1057 build_sender_ref (tree var, omp_context *ctx)
1059 tree field = lookup_sfield (var, ctx);
1060 return omp_build_component_ref (ctx->sender_decl, field);
1063 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1065 static void
1066 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1068 tree field, type, sfield = NULL_TREE;
1070 gcc_assert ((mask & 1) == 0
1071 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1072 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1073 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1075 type = TREE_TYPE (var);
1076 if (mask & 4)
1078 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1079 type = build_pointer_type (build_pointer_type (type));
1081 else if (by_ref)
1082 type = build_pointer_type (type);
1083 else if ((mask & 3) == 1 && is_reference (var))
1084 type = TREE_TYPE (type);
1086 field = build_decl (DECL_SOURCE_LOCATION (var),
1087 FIELD_DECL, DECL_NAME (var), type);
1089 /* Remember what variable this field was created for. This does have a
1090 side effect of making dwarf2out ignore this member, so for helpful
1091 debugging we clear it later in delete_omp_context. */
1092 DECL_ABSTRACT_ORIGIN (field) = var;
1093 if (type == TREE_TYPE (var))
1095 DECL_ALIGN (field) = DECL_ALIGN (var);
1096 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1097 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1099 else
1100 DECL_ALIGN (field) = TYPE_ALIGN (type);
1102 if ((mask & 3) == 3)
1104 insert_field_into_struct (ctx->record_type, field);
1105 if (ctx->srecord_type)
1107 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1108 FIELD_DECL, DECL_NAME (var), type);
1109 DECL_ABSTRACT_ORIGIN (sfield) = var;
1110 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1111 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1112 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1113 insert_field_into_struct (ctx->srecord_type, sfield);
1116 else
1118 if (ctx->srecord_type == NULL_TREE)
1120 tree t;
1122 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1123 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1124 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1126 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1127 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1128 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1129 insert_field_into_struct (ctx->srecord_type, sfield);
1130 splay_tree_insert (ctx->sfield_map,
1131 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1132 (splay_tree_value) sfield);
1135 sfield = field;
1136 insert_field_into_struct ((mask & 1) ? ctx->record_type
1137 : ctx->srecord_type, field);
1140 if (mask & 1)
1141 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1142 (splay_tree_value) field);
1143 if ((mask & 2) && ctx->sfield_map)
1144 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1145 (splay_tree_value) sfield);
1148 static tree
1149 install_var_local (tree var, omp_context *ctx)
1151 tree new_var = omp_copy_decl_1 (var, ctx);
1152 insert_decl_map (&ctx->cb, var, new_var);
1153 return new_var;
1156 /* Adjust the replacement for DECL in CTX for the new context. This means
1157 copying the DECL_VALUE_EXPR, and fixing up the type. */
1159 static void
1160 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1162 tree new_decl, size;
1164 new_decl = lookup_decl (decl, ctx);
1166 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1168 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1169 && DECL_HAS_VALUE_EXPR_P (decl))
1171 tree ve = DECL_VALUE_EXPR (decl);
1172 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1173 SET_DECL_VALUE_EXPR (new_decl, ve);
1174 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1177 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1179 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1180 if (size == error_mark_node)
1181 size = TYPE_SIZE (TREE_TYPE (new_decl));
1182 DECL_SIZE (new_decl) = size;
1184 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1185 if (size == error_mark_node)
1186 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1187 DECL_SIZE_UNIT (new_decl) = size;
1191 /* The callback for remap_decl. Search all containing contexts for a
1192 mapping of the variable; this avoids having to duplicate the splay
1193 tree ahead of time. We know a mapping doesn't already exist in the
1194 given context. Create new mappings to implement default semantics. */
1196 static tree
1197 omp_copy_decl (tree var, copy_body_data *cb)
1199 omp_context *ctx = (omp_context *) cb;
1200 tree new_var;
1202 if (TREE_CODE (var) == LABEL_DECL)
1204 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1205 DECL_CONTEXT (new_var) = current_function_decl;
1206 insert_decl_map (&ctx->cb, var, new_var);
1207 return new_var;
1210 while (!is_taskreg_ctx (ctx))
1212 ctx = ctx->outer;
1213 if (ctx == NULL)
1214 return var;
1215 new_var = maybe_lookup_decl (var, ctx);
1216 if (new_var)
1217 return new_var;
1220 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1221 return var;
1223 return error_mark_node;
1227 /* Debugging dumps for parallel regions. */
1228 void dump_omp_region (FILE *, struct omp_region *, int);
1229 void debug_omp_region (struct omp_region *);
1230 void debug_all_omp_regions (void);
1232 /* Dump the parallel region tree rooted at REGION. */
1234 void
1235 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1237 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1238 gimple_code_name[region->type]);
1240 if (region->inner)
1241 dump_omp_region (file, region->inner, indent + 4);
1243 if (region->cont)
1245 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1246 region->cont->index);
1249 if (region->exit)
1250 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1251 region->exit->index);
1252 else
1253 fprintf (file, "%*s[no exit marker]\n", indent, "");
1255 if (region->next)
1256 dump_omp_region (file, region->next, indent);
1259 DEBUG_FUNCTION void
1260 debug_omp_region (struct omp_region *region)
1262 dump_omp_region (stderr, region, 0);
1265 DEBUG_FUNCTION void
1266 debug_all_omp_regions (void)
1268 dump_omp_region (stderr, root_omp_region, 0);
1272 /* Create a new parallel region starting at STMT inside region PARENT. */
1274 static struct omp_region *
1275 new_omp_region (basic_block bb, enum gimple_code type,
1276 struct omp_region *parent)
1278 struct omp_region *region = XCNEW (struct omp_region);
1280 region->outer = parent;
1281 region->entry = bb;
1282 region->type = type;
1284 if (parent)
1286 /* This is a nested region. Add it to the list of inner
1287 regions in PARENT. */
1288 region->next = parent->inner;
1289 parent->inner = region;
1291 else
1293 /* This is a toplevel region. Add it to the list of toplevel
1294 regions in ROOT_OMP_REGION. */
1295 region->next = root_omp_region;
1296 root_omp_region = region;
1299 return region;
1302 /* Release the memory associated with the region tree rooted at REGION. */
1304 static void
1305 free_omp_region_1 (struct omp_region *region)
1307 struct omp_region *i, *n;
1309 for (i = region->inner; i ; i = n)
1311 n = i->next;
1312 free_omp_region_1 (i);
1315 free (region);
1318 /* Release the memory for the entire omp region tree. */
1320 void
1321 free_omp_regions (void)
1323 struct omp_region *r, *n;
1324 for (r = root_omp_region; r ; r = n)
1326 n = r->next;
1327 free_omp_region_1 (r);
1329 root_omp_region = NULL;
1333 /* Create a new context, with OUTER_CTX being the surrounding context. */
1335 static omp_context *
1336 new_omp_context (gimple stmt, omp_context *outer_ctx)
1338 omp_context *ctx = XCNEW (omp_context);
1340 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1341 (splay_tree_value) ctx);
1342 ctx->stmt = stmt;
1344 if (outer_ctx)
1346 ctx->outer = outer_ctx;
1347 ctx->cb = outer_ctx->cb;
1348 ctx->cb.block = NULL;
1349 ctx->depth = outer_ctx->depth + 1;
1351 else
1353 ctx->cb.src_fn = current_function_decl;
1354 ctx->cb.dst_fn = current_function_decl;
1355 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1356 gcc_checking_assert (ctx->cb.src_node);
1357 ctx->cb.dst_node = ctx->cb.src_node;
1358 ctx->cb.src_cfun = cfun;
1359 ctx->cb.copy_decl = omp_copy_decl;
1360 ctx->cb.eh_lp_nr = 0;
1361 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1362 ctx->depth = 1;
1365 ctx->cb.decl_map = new hash_map<tree, tree>;
1367 return ctx;
1370 static gimple_seq maybe_catch_exception (gimple_seq);
1372 /* Finalize task copyfn. */
1374 static void
1375 finalize_task_copyfn (gimple task_stmt)
1377 struct function *child_cfun;
1378 tree child_fn;
1379 gimple_seq seq = NULL, new_seq;
1380 gimple bind;
1382 child_fn = gimple_omp_task_copy_fn (task_stmt);
1383 if (child_fn == NULL_TREE)
1384 return;
1386 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1387 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1389 push_cfun (child_cfun);
1390 bind = gimplify_body (child_fn, false);
1391 gimple_seq_add_stmt (&seq, bind);
1392 new_seq = maybe_catch_exception (seq);
1393 if (new_seq != seq)
1395 bind = gimple_build_bind (NULL, new_seq, NULL);
1396 seq = NULL;
1397 gimple_seq_add_stmt (&seq, bind);
1399 gimple_set_body (child_fn, seq);
1400 pop_cfun ();
1402 /* Inform the callgraph about the new function. */
1403 cgraph_node::add_new_function (child_fn, false);
1406 /* Destroy a omp_context data structures. Called through the splay tree
1407 value delete callback. */
1409 static void
1410 delete_omp_context (splay_tree_value value)
1412 omp_context *ctx = (omp_context *) value;
1414 delete ctx->cb.decl_map;
1416 if (ctx->field_map)
1417 splay_tree_delete (ctx->field_map);
1418 if (ctx->sfield_map)
1419 splay_tree_delete (ctx->sfield_map);
1421 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1422 it produces corrupt debug information. */
1423 if (ctx->record_type)
1425 tree t;
1426 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1427 DECL_ABSTRACT_ORIGIN (t) = NULL;
1429 if (ctx->srecord_type)
1431 tree t;
1432 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1433 DECL_ABSTRACT_ORIGIN (t) = NULL;
1436 if (is_task_ctx (ctx))
1437 finalize_task_copyfn (ctx->stmt);
1439 XDELETE (ctx);
1442 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1443 context. */
1445 static void
1446 fixup_child_record_type (omp_context *ctx)
1448 tree f, type = ctx->record_type;
1450 /* ??? It isn't sufficient to just call remap_type here, because
1451 variably_modified_type_p doesn't work the way we expect for
1452 record types. Testing each field for whether it needs remapping
1453 and creating a new record by hand works, however. */
1454 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1455 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1456 break;
1457 if (f)
1459 tree name, new_fields = NULL;
1461 type = lang_hooks.types.make_type (RECORD_TYPE);
1462 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1463 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1464 TYPE_DECL, name, type);
1465 TYPE_NAME (type) = name;
1467 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1469 tree new_f = copy_node (f);
1470 DECL_CONTEXT (new_f) = type;
1471 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1472 DECL_CHAIN (new_f) = new_fields;
1473 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1474 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1475 &ctx->cb, NULL);
1476 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1477 &ctx->cb, NULL);
1478 new_fields = new_f;
1480 /* Arrange to be able to look up the receiver field
1481 given the sender field. */
1482 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1483 (splay_tree_value) new_f);
1485 TYPE_FIELDS (type) = nreverse (new_fields);
1486 layout_type (type);
1489 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1492 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1493 specified by CLAUSES. */
1495 static void
1496 scan_sharing_clauses (tree clauses, omp_context *ctx)
1498 tree c, decl;
1499 bool scan_array_reductions = false;
1501 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1503 bool by_ref;
1505 switch (OMP_CLAUSE_CODE (c))
1507 case OMP_CLAUSE_PRIVATE:
1508 decl = OMP_CLAUSE_DECL (c);
1509 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1510 goto do_private;
1511 else if (!is_variable_sized (decl))
1512 install_var_local (decl, ctx);
1513 break;
1515 case OMP_CLAUSE_SHARED:
1516 decl = OMP_CLAUSE_DECL (c);
1517 /* Ignore shared directives in teams construct. */
1518 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1520 /* Global variables don't need to be copied,
1521 the receiver side will use them directly. */
1522 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1523 if (is_global_var (odecl))
1524 break;
1525 insert_decl_map (&ctx->cb, decl, odecl);
1526 break;
1528 gcc_assert (is_taskreg_ctx (ctx));
1529 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1530 || !is_variable_sized (decl));
1531 /* Global variables don't need to be copied,
1532 the receiver side will use them directly. */
1533 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1534 break;
1535 by_ref = use_pointer_for_field (decl, ctx);
1536 if (! TREE_READONLY (decl)
1537 || TREE_ADDRESSABLE (decl)
1538 || by_ref
1539 || is_reference (decl))
1541 install_var_field (decl, by_ref, 3, ctx);
1542 install_var_local (decl, ctx);
1543 break;
1545 /* We don't need to copy const scalar vars back. */
1546 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1547 goto do_private;
1549 case OMP_CLAUSE_LASTPRIVATE:
1550 /* Let the corresponding firstprivate clause create
1551 the variable. */
1552 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1553 break;
1554 /* FALLTHRU */
1556 case OMP_CLAUSE_FIRSTPRIVATE:
1557 case OMP_CLAUSE_REDUCTION:
1558 case OMP_CLAUSE_LINEAR:
1559 decl = OMP_CLAUSE_DECL (c);
1560 do_private:
1561 if (is_variable_sized (decl))
1563 if (is_task_ctx (ctx))
1564 install_var_field (decl, false, 1, ctx);
1565 break;
1567 else if (is_taskreg_ctx (ctx))
1569 bool global
1570 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1571 by_ref = use_pointer_for_field (decl, NULL);
1573 if (is_task_ctx (ctx)
1574 && (global || by_ref || is_reference (decl)))
1576 install_var_field (decl, false, 1, ctx);
1577 if (!global)
1578 install_var_field (decl, by_ref, 2, ctx);
1580 else if (!global)
1581 install_var_field (decl, by_ref, 3, ctx);
1583 install_var_local (decl, ctx);
1584 break;
1586 case OMP_CLAUSE__LOOPTEMP_:
1587 gcc_assert (is_parallel_ctx (ctx));
1588 decl = OMP_CLAUSE_DECL (c);
1589 install_var_field (decl, false, 3, ctx);
1590 install_var_local (decl, ctx);
1591 break;
1593 case OMP_CLAUSE_COPYPRIVATE:
1594 case OMP_CLAUSE_COPYIN:
1595 decl = OMP_CLAUSE_DECL (c);
1596 by_ref = use_pointer_for_field (decl, NULL);
1597 install_var_field (decl, by_ref, 3, ctx);
1598 break;
1600 case OMP_CLAUSE_DEFAULT:
1601 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1602 break;
1604 case OMP_CLAUSE_FINAL:
1605 case OMP_CLAUSE_IF:
1606 case OMP_CLAUSE_NUM_THREADS:
1607 case OMP_CLAUSE_NUM_TEAMS:
1608 case OMP_CLAUSE_THREAD_LIMIT:
1609 case OMP_CLAUSE_DEVICE:
1610 case OMP_CLAUSE_SCHEDULE:
1611 case OMP_CLAUSE_DIST_SCHEDULE:
1612 case OMP_CLAUSE_DEPEND:
1613 case OMP_CLAUSE__CILK_FOR_COUNT_:
1614 if (ctx->outer)
1615 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1616 break;
1618 case OMP_CLAUSE_TO:
1619 case OMP_CLAUSE_FROM:
1620 case OMP_CLAUSE_MAP:
1621 if (ctx->outer)
1622 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1623 decl = OMP_CLAUSE_DECL (c);
1624 /* Global variables with "omp declare target" attribute
1625 don't need to be copied, the receiver side will use them
1626 directly. */
1627 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1628 && DECL_P (decl)
1629 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1630 && lookup_attribute ("omp declare target",
1631 DECL_ATTRIBUTES (decl)))
1632 break;
1633 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1634 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1636 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1637 #pragma omp target data, there is nothing to map for
1638 those. */
1639 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1640 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1641 break;
1643 if (DECL_P (decl))
1645 if (DECL_SIZE (decl)
1646 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1648 tree decl2 = DECL_VALUE_EXPR (decl);
1649 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1650 decl2 = TREE_OPERAND (decl2, 0);
1651 gcc_assert (DECL_P (decl2));
1652 install_var_field (decl2, true, 3, ctx);
1653 install_var_local (decl2, ctx);
1654 install_var_local (decl, ctx);
1656 else
1658 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1659 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1660 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1661 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1662 install_var_field (decl, true, 7, ctx);
1663 else
1664 install_var_field (decl, true, 3, ctx);
1665 if (gimple_omp_target_kind (ctx->stmt)
1666 == GF_OMP_TARGET_KIND_REGION)
1667 install_var_local (decl, ctx);
1670 else
1672 tree base = get_base_address (decl);
1673 tree nc = OMP_CLAUSE_CHAIN (c);
1674 if (DECL_P (base)
1675 && nc != NULL_TREE
1676 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1677 && OMP_CLAUSE_DECL (nc) == base
1678 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1679 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1681 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1682 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1684 else
1686 if (ctx->outer)
1688 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1689 decl = OMP_CLAUSE_DECL (c);
1691 gcc_assert (!splay_tree_lookup (ctx->field_map,
1692 (splay_tree_key) decl));
1693 tree field
1694 = build_decl (OMP_CLAUSE_LOCATION (c),
1695 FIELD_DECL, NULL_TREE, ptr_type_node);
1696 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1697 insert_field_into_struct (ctx->record_type, field);
1698 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1699 (splay_tree_value) field);
1702 break;
1704 case OMP_CLAUSE_NOWAIT:
1705 case OMP_CLAUSE_ORDERED:
1706 case OMP_CLAUSE_COLLAPSE:
1707 case OMP_CLAUSE_UNTIED:
1708 case OMP_CLAUSE_MERGEABLE:
1709 case OMP_CLAUSE_PROC_BIND:
1710 case OMP_CLAUSE_SAFELEN:
1711 break;
1713 case OMP_CLAUSE_ALIGNED:
1714 decl = OMP_CLAUSE_DECL (c);
1715 if (is_global_var (decl)
1716 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1717 install_var_local (decl, ctx);
1718 break;
1720 default:
1721 gcc_unreachable ();
1725 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1727 switch (OMP_CLAUSE_CODE (c))
1729 case OMP_CLAUSE_LASTPRIVATE:
1730 /* Let the corresponding firstprivate clause create
1731 the variable. */
1732 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1733 scan_array_reductions = true;
1734 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1735 break;
1736 /* FALLTHRU */
1738 case OMP_CLAUSE_PRIVATE:
1739 case OMP_CLAUSE_FIRSTPRIVATE:
1740 case OMP_CLAUSE_REDUCTION:
1741 case OMP_CLAUSE_LINEAR:
1742 decl = OMP_CLAUSE_DECL (c);
1743 if (is_variable_sized (decl))
1744 install_var_local (decl, ctx);
1745 fixup_remapped_decl (decl, ctx,
1746 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1747 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1748 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1749 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1750 scan_array_reductions = true;
1751 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1752 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1753 scan_array_reductions = true;
1754 break;
1756 case OMP_CLAUSE_SHARED:
1757 /* Ignore shared directives in teams construct. */
1758 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1759 break;
1760 decl = OMP_CLAUSE_DECL (c);
1761 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1762 fixup_remapped_decl (decl, ctx, false);
1763 break;
1765 case OMP_CLAUSE_MAP:
1766 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1767 break;
1768 decl = OMP_CLAUSE_DECL (c);
1769 if (DECL_P (decl)
1770 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1771 && lookup_attribute ("omp declare target",
1772 DECL_ATTRIBUTES (decl)))
1773 break;
1774 if (DECL_P (decl))
1776 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1777 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1778 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1780 tree new_decl = lookup_decl (decl, ctx);
1781 TREE_TYPE (new_decl)
1782 = remap_type (TREE_TYPE (decl), &ctx->cb);
1784 else if (DECL_SIZE (decl)
1785 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1787 tree decl2 = DECL_VALUE_EXPR (decl);
1788 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1789 decl2 = TREE_OPERAND (decl2, 0);
1790 gcc_assert (DECL_P (decl2));
1791 fixup_remapped_decl (decl2, ctx, false);
1792 fixup_remapped_decl (decl, ctx, true);
1794 else
1795 fixup_remapped_decl (decl, ctx, false);
1797 break;
1799 case OMP_CLAUSE_COPYPRIVATE:
1800 case OMP_CLAUSE_COPYIN:
1801 case OMP_CLAUSE_DEFAULT:
1802 case OMP_CLAUSE_IF:
1803 case OMP_CLAUSE_NUM_THREADS:
1804 case OMP_CLAUSE_NUM_TEAMS:
1805 case OMP_CLAUSE_THREAD_LIMIT:
1806 case OMP_CLAUSE_DEVICE:
1807 case OMP_CLAUSE_SCHEDULE:
1808 case OMP_CLAUSE_DIST_SCHEDULE:
1809 case OMP_CLAUSE_NOWAIT:
1810 case OMP_CLAUSE_ORDERED:
1811 case OMP_CLAUSE_COLLAPSE:
1812 case OMP_CLAUSE_UNTIED:
1813 case OMP_CLAUSE_FINAL:
1814 case OMP_CLAUSE_MERGEABLE:
1815 case OMP_CLAUSE_PROC_BIND:
1816 case OMP_CLAUSE_SAFELEN:
1817 case OMP_CLAUSE_ALIGNED:
1818 case OMP_CLAUSE_DEPEND:
1819 case OMP_CLAUSE__LOOPTEMP_:
1820 case OMP_CLAUSE_TO:
1821 case OMP_CLAUSE_FROM:
1822 case OMP_CLAUSE__CILK_FOR_COUNT_:
1823 break;
1825 default:
1826 gcc_unreachable ();
1830 if (scan_array_reductions)
1831 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1832 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1833 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1835 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1836 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1838 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1839 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1840 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1841 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1842 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1843 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1846 /* Create a new name for omp child function. Returns an identifier. If
1847 IS_CILK_FOR is true then the suffix for the child function is
1848 "_cilk_for_fn." */
1850 static tree
1851 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
1853 if (is_cilk_for)
1854 return clone_function_name (current_function_decl, "_cilk_for_fn");
1855 return clone_function_name (current_function_decl,
1856 task_copy ? "_omp_cpyfn" : "_omp_fn");
1859 /* Returns the type of the induction variable for the child function for
1860 _Cilk_for and the types for _high and _low variables based on TYPE. */
1862 static tree
1863 cilk_for_check_loop_diff_type (tree type)
1865 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
1867 if (TYPE_UNSIGNED (type))
1868 return uint32_type_node;
1869 else
1870 return integer_type_node;
1872 else
1874 if (TYPE_UNSIGNED (type))
1875 return uint64_type_node;
1876 else
1877 return long_long_integer_type_node;
1881 /* Build a decl for the omp child function. It'll not contain a body
1882 yet, just the bare decl. */
1884 static void
1885 create_omp_child_function (omp_context *ctx, bool task_copy)
1887 tree decl, type, name, t;
1889 tree cilk_for_count
1890 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
1891 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1892 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
1893 tree cilk_var_type = NULL_TREE;
1895 name = create_omp_child_function_name (task_copy,
1896 cilk_for_count != NULL_TREE);
1897 if (task_copy)
1898 type = build_function_type_list (void_type_node, ptr_type_node,
1899 ptr_type_node, NULL_TREE);
1900 else if (cilk_for_count)
1902 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
1903 cilk_var_type = cilk_for_check_loop_diff_type (type);
1904 type = build_function_type_list (void_type_node, ptr_type_node,
1905 cilk_var_type, cilk_var_type, NULL_TREE);
1907 else
1908 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1910 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
1912 if (!task_copy)
1913 ctx->cb.dst_fn = decl;
1914 else
1915 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1917 TREE_STATIC (decl) = 1;
1918 TREE_USED (decl) = 1;
1919 DECL_ARTIFICIAL (decl) = 1;
1920 DECL_IGNORED_P (decl) = 0;
1921 TREE_PUBLIC (decl) = 0;
1922 DECL_UNINLINABLE (decl) = 1;
1923 DECL_EXTERNAL (decl) = 0;
1924 DECL_CONTEXT (decl) = NULL_TREE;
1925 DECL_INITIAL (decl) = make_node (BLOCK);
1926 bool target_p = false;
1927 if (lookup_attribute ("omp declare target",
1928 DECL_ATTRIBUTES (current_function_decl)))
1929 target_p = true;
1930 else
1932 omp_context *octx;
1933 for (octx = ctx; octx; octx = octx->outer)
1934 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1935 && gimple_omp_target_kind (octx->stmt)
1936 == GF_OMP_TARGET_KIND_REGION)
1938 target_p = true;
1939 break;
1942 if (target_p)
1943 DECL_ATTRIBUTES (decl)
1944 = tree_cons (get_identifier ("omp declare target"),
1945 NULL_TREE, DECL_ATTRIBUTES (decl));
1947 t = build_decl (DECL_SOURCE_LOCATION (decl),
1948 RESULT_DECL, NULL_TREE, void_type_node);
1949 DECL_ARTIFICIAL (t) = 1;
1950 DECL_IGNORED_P (t) = 1;
1951 DECL_CONTEXT (t) = decl;
1952 DECL_RESULT (decl) = t;
1954 /* _Cilk_for's child function requires two extra parameters called
1955 __low and __high that are set the by Cilk runtime when it calls this
1956 function. */
1957 if (cilk_for_count)
1959 t = build_decl (DECL_SOURCE_LOCATION (decl),
1960 PARM_DECL, get_identifier ("__high"), cilk_var_type);
1961 DECL_ARTIFICIAL (t) = 1;
1962 DECL_NAMELESS (t) = 1;
1963 DECL_ARG_TYPE (t) = ptr_type_node;
1964 DECL_CONTEXT (t) = current_function_decl;
1965 TREE_USED (t) = 1;
1966 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1967 DECL_ARGUMENTS (decl) = t;
1969 t = build_decl (DECL_SOURCE_LOCATION (decl),
1970 PARM_DECL, get_identifier ("__low"), cilk_var_type);
1971 DECL_ARTIFICIAL (t) = 1;
1972 DECL_NAMELESS (t) = 1;
1973 DECL_ARG_TYPE (t) = ptr_type_node;
1974 DECL_CONTEXT (t) = current_function_decl;
1975 TREE_USED (t) = 1;
1976 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1977 DECL_ARGUMENTS (decl) = t;
1980 tree data_name = get_identifier (".omp_data_i");
1981 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
1982 ptr_type_node);
1983 DECL_ARTIFICIAL (t) = 1;
1984 DECL_NAMELESS (t) = 1;
1985 DECL_ARG_TYPE (t) = ptr_type_node;
1986 DECL_CONTEXT (t) = current_function_decl;
1987 TREE_USED (t) = 1;
1988 if (cilk_for_count)
1989 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1990 DECL_ARGUMENTS (decl) = t;
1991 if (!task_copy)
1992 ctx->receiver_decl = t;
1993 else
1995 t = build_decl (DECL_SOURCE_LOCATION (decl),
1996 PARM_DECL, get_identifier (".omp_data_o"),
1997 ptr_type_node);
1998 DECL_ARTIFICIAL (t) = 1;
1999 DECL_NAMELESS (t) = 1;
2000 DECL_ARG_TYPE (t) = ptr_type_node;
2001 DECL_CONTEXT (t) = current_function_decl;
2002 TREE_USED (t) = 1;
2003 TREE_ADDRESSABLE (t) = 1;
2004 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2005 DECL_ARGUMENTS (decl) = t;
2008 /* Allocate memory for the function structure. The call to
2009 allocate_struct_function clobbers CFUN, so we need to restore
2010 it afterward. */
2011 push_struct_function (decl);
2012 cfun->function_end_locus = gimple_location (ctx->stmt);
2013 pop_cfun ();
2016 /* Callback for walk_gimple_seq. Check if combined parallel
2017 contains gimple_omp_for_combined_into_p OMP_FOR. */
2019 static tree
2020 find_combined_for (gimple_stmt_iterator *gsi_p,
2021 bool *handled_ops_p,
2022 struct walk_stmt_info *wi)
2024 gimple stmt = gsi_stmt (*gsi_p);
2026 *handled_ops_p = true;
2027 switch (gimple_code (stmt))
2029 WALK_SUBSTMTS;
2031 case GIMPLE_OMP_FOR:
2032 if (gimple_omp_for_combined_into_p (stmt)
2033 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2035 wi->info = stmt;
2036 return integer_zero_node;
2038 break;
2039 default:
2040 break;
2042 return NULL;
2045 /* Scan an OpenMP parallel directive. */
2047 static void
2048 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2050 omp_context *ctx;
2051 tree name;
2052 gimple_omp_parallel stmt = as_a <gimple_omp_parallel> (gsi_stmt (*gsi));
2054 /* Ignore parallel directives with empty bodies, unless there
2055 are copyin clauses. */
2056 if (optimize > 0
2057 && empty_body_p (gimple_omp_body (stmt))
2058 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2059 OMP_CLAUSE_COPYIN) == NULL)
2061 gsi_replace (gsi, gimple_build_nop (), false);
2062 return;
2065 if (gimple_omp_parallel_combined_p (stmt))
2067 struct walk_stmt_info wi;
2069 memset (&wi, 0, sizeof (wi));
2070 wi.val_only = true;
2071 walk_gimple_seq (gimple_omp_body (stmt),
2072 find_combined_for, NULL, &wi);
2073 if (wi.info)
2075 gimple_omp_for for_stmt = as_a <gimple_omp_for> ((gimple) wi.info);
2076 struct omp_for_data fd;
2077 extract_omp_for_data (for_stmt, &fd, NULL);
2078 /* We need two temporaries with fd.loop.v type (istart/iend)
2079 and then (fd.collapse - 1) temporaries with the same
2080 type for count2 ... countN-1 vars if not constant. */
2081 size_t count = 2, i;
2082 tree type = fd.iter_type;
2083 if (fd.collapse > 1
2084 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2085 count += fd.collapse - 1;
2086 for (i = 0; i < count; i++)
2088 tree temp = create_tmp_var (type, NULL);
2089 tree c = build_omp_clause (UNKNOWN_LOCATION,
2090 OMP_CLAUSE__LOOPTEMP_);
2091 insert_decl_map (&outer_ctx->cb, temp, temp);
2092 OMP_CLAUSE_DECL (c) = temp;
2093 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2094 gimple_omp_parallel_set_clauses (stmt, c);
2099 ctx = new_omp_context (stmt, outer_ctx);
2100 taskreg_contexts.safe_push (ctx);
2101 if (taskreg_nesting_level > 1)
2102 ctx->is_nested = true;
2103 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2104 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2105 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2106 name = create_tmp_var_name (".omp_data_s");
2107 name = build_decl (gimple_location (stmt),
2108 TYPE_DECL, name, ctx->record_type);
2109 DECL_ARTIFICIAL (name) = 1;
2110 DECL_NAMELESS (name) = 1;
2111 TYPE_NAME (ctx->record_type) = name;
2112 create_omp_child_function (ctx, false);
2113 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2115 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2116 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2118 if (TYPE_FIELDS (ctx->record_type) == NULL)
2119 ctx->record_type = ctx->receiver_decl = NULL;
2122 /* Scan an OpenMP task directive. */
2124 static void
2125 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2127 omp_context *ctx;
2128 tree name, t;
2129 gimple stmt = gsi_stmt (*gsi);
2131 /* Ignore task directives with empty bodies. */
2132 if (optimize > 0
2133 && empty_body_p (gimple_omp_body (stmt)))
2135 gsi_replace (gsi, gimple_build_nop (), false);
2136 return;
2139 ctx = new_omp_context (stmt, outer_ctx);
2140 taskreg_contexts.safe_push (ctx);
2141 if (taskreg_nesting_level > 1)
2142 ctx->is_nested = true;
2143 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2144 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2145 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2146 name = create_tmp_var_name (".omp_data_s");
2147 name = build_decl (gimple_location (stmt),
2148 TYPE_DECL, name, ctx->record_type);
2149 DECL_ARTIFICIAL (name) = 1;
2150 DECL_NAMELESS (name) = 1;
2151 TYPE_NAME (ctx->record_type) = name;
2152 create_omp_child_function (ctx, false);
2153 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2155 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2157 if (ctx->srecord_type)
2159 name = create_tmp_var_name (".omp_data_a");
2160 name = build_decl (gimple_location (stmt),
2161 TYPE_DECL, name, ctx->srecord_type);
2162 DECL_ARTIFICIAL (name) = 1;
2163 DECL_NAMELESS (name) = 1;
2164 TYPE_NAME (ctx->srecord_type) = name;
2165 create_omp_child_function (ctx, true);
2168 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2170 if (TYPE_FIELDS (ctx->record_type) == NULL)
2172 ctx->record_type = ctx->receiver_decl = NULL;
2173 t = build_int_cst (long_integer_type_node, 0);
2174 gimple_omp_task_set_arg_size (stmt, t);
2175 t = build_int_cst (long_integer_type_node, 1);
2176 gimple_omp_task_set_arg_align (stmt, t);
2181 /* If any decls have been made addressable during scan_omp,
2182 adjust their fields if needed, and layout record types
2183 of parallel/task constructs. */
2185 static void
2186 finish_taskreg_scan (omp_context *ctx)
2188 if (ctx->record_type == NULL_TREE)
2189 return;
2191 /* If any task_shared_vars were needed, verify all
2192 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2193 statements if use_pointer_for_field hasn't changed
2194 because of that. If it did, update field types now. */
2195 if (task_shared_vars)
2197 tree c;
2199 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2200 c; c = OMP_CLAUSE_CHAIN (c))
2201 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
2203 tree decl = OMP_CLAUSE_DECL (c);
2205 /* Global variables don't need to be copied,
2206 the receiver side will use them directly. */
2207 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2208 continue;
2209 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2210 || !use_pointer_for_field (decl, ctx))
2211 continue;
2212 tree field = lookup_field (decl, ctx);
2213 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2214 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2215 continue;
2216 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2217 TREE_THIS_VOLATILE (field) = 0;
2218 DECL_USER_ALIGN (field) = 0;
2219 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2220 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2221 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2222 if (ctx->srecord_type)
2224 tree sfield = lookup_sfield (decl, ctx);
2225 TREE_TYPE (sfield) = TREE_TYPE (field);
2226 TREE_THIS_VOLATILE (sfield) = 0;
2227 DECL_USER_ALIGN (sfield) = 0;
2228 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2229 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2230 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2235 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2237 layout_type (ctx->record_type);
2238 fixup_child_record_type (ctx);
2240 else
2242 location_t loc = gimple_location (ctx->stmt);
2243 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2244 /* Move VLA fields to the end. */
2245 p = &TYPE_FIELDS (ctx->record_type);
2246 while (*p)
2247 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2248 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2250 *q = *p;
2251 *p = TREE_CHAIN (*p);
2252 TREE_CHAIN (*q) = NULL_TREE;
2253 q = &TREE_CHAIN (*q);
2255 else
2256 p = &DECL_CHAIN (*p);
2257 *p = vla_fields;
2258 layout_type (ctx->record_type);
2259 fixup_child_record_type (ctx);
2260 if (ctx->srecord_type)
2261 layout_type (ctx->srecord_type);
2262 tree t = fold_convert_loc (loc, long_integer_type_node,
2263 TYPE_SIZE_UNIT (ctx->record_type));
2264 gimple_omp_task_set_arg_size (ctx->stmt, t);
2265 t = build_int_cst (long_integer_type_node,
2266 TYPE_ALIGN_UNIT (ctx->record_type));
2267 gimple_omp_task_set_arg_align (ctx->stmt, t);
2272 /* Scan an OpenMP loop directive. */
2274 static void
2275 scan_omp_for (gimple_omp_for stmt, omp_context *outer_ctx)
2277 omp_context *ctx;
2278 size_t i;
2280 ctx = new_omp_context (stmt, outer_ctx);
2282 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2284 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2285 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2287 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2288 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2289 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2290 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2292 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2295 /* Scan an OpenMP sections directive. */
2297 static void
2298 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
2300 omp_context *ctx;
2302 ctx = new_omp_context (stmt, outer_ctx);
2303 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2304 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2307 /* Scan an OpenMP single directive. */
2309 static void
2310 scan_omp_single (gimple stmt, omp_context *outer_ctx)
2312 omp_context *ctx;
2313 tree name;
2315 ctx = new_omp_context (stmt, outer_ctx);
2316 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2317 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2318 name = create_tmp_var_name (".omp_copy_s");
2319 name = build_decl (gimple_location (stmt),
2320 TYPE_DECL, name, ctx->record_type);
2321 TYPE_NAME (ctx->record_type) = name;
2323 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2324 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2326 if (TYPE_FIELDS (ctx->record_type) == NULL)
2327 ctx->record_type = NULL;
2328 else
2329 layout_type (ctx->record_type);
2332 /* Scan an OpenMP target{, data, update} directive. */
2334 static void
2335 scan_omp_target (gimple stmt, omp_context *outer_ctx)
2337 omp_context *ctx;
2338 tree name;
2339 int kind = gimple_omp_target_kind (stmt);
2341 ctx = new_omp_context (stmt, outer_ctx);
2342 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2343 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2344 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2345 name = create_tmp_var_name (".omp_data_t");
2346 name = build_decl (gimple_location (stmt),
2347 TYPE_DECL, name, ctx->record_type);
2348 DECL_ARTIFICIAL (name) = 1;
2349 DECL_NAMELESS (name) = 1;
2350 TYPE_NAME (ctx->record_type) = name;
2351 if (kind == GF_OMP_TARGET_KIND_REGION)
2353 create_omp_child_function (ctx, false);
2354 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2357 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2358 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2360 if (TYPE_FIELDS (ctx->record_type) == NULL)
2361 ctx->record_type = ctx->receiver_decl = NULL;
2362 else
2364 TYPE_FIELDS (ctx->record_type)
2365 = nreverse (TYPE_FIELDS (ctx->record_type));
2366 #ifdef ENABLE_CHECKING
2367 tree field;
2368 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2369 for (field = TYPE_FIELDS (ctx->record_type);
2370 field;
2371 field = DECL_CHAIN (field))
2372 gcc_assert (DECL_ALIGN (field) == align);
2373 #endif
2374 layout_type (ctx->record_type);
2375 if (kind == GF_OMP_TARGET_KIND_REGION)
2376 fixup_child_record_type (ctx);
2380 /* Scan an OpenMP teams directive. */
2382 static void
2383 scan_omp_teams (gimple stmt, omp_context *outer_ctx)
2385 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2386 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2387 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2390 /* Check OpenMP nesting restrictions. */
2391 static bool
2392 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2394 if (ctx != NULL)
2396 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2397 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2399 error_at (gimple_location (stmt),
2400 "OpenMP constructs may not be nested inside simd region");
2401 return false;
2403 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2405 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2406 || (gimple_omp_for_kind (stmt)
2407 != GF_OMP_FOR_KIND_DISTRIBUTE))
2408 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2410 error_at (gimple_location (stmt),
2411 "only distribute or parallel constructs are allowed to "
2412 "be closely nested inside teams construct");
2413 return false;
2417 switch (gimple_code (stmt))
2419 case GIMPLE_OMP_FOR:
2420 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2421 return true;
2422 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2424 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2426 error_at (gimple_location (stmt),
2427 "distribute construct must be closely nested inside "
2428 "teams construct");
2429 return false;
2431 return true;
2433 /* FALLTHRU */
2434 case GIMPLE_CALL:
2435 if (is_gimple_call (stmt)
2436 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2437 == BUILT_IN_GOMP_CANCEL
2438 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2439 == BUILT_IN_GOMP_CANCELLATION_POINT))
2441 const char *bad = NULL;
2442 const char *kind = NULL;
2443 if (ctx == NULL)
2445 error_at (gimple_location (stmt), "orphaned %qs construct",
2446 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2447 == BUILT_IN_GOMP_CANCEL
2448 ? "#pragma omp cancel"
2449 : "#pragma omp cancellation point");
2450 return false;
2452 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2453 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2454 : 0)
2456 case 1:
2457 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2458 bad = "#pragma omp parallel";
2459 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2460 == BUILT_IN_GOMP_CANCEL
2461 && !integer_zerop (gimple_call_arg (stmt, 1)))
2462 ctx->cancellable = true;
2463 kind = "parallel";
2464 break;
2465 case 2:
2466 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2467 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2468 bad = "#pragma omp for";
2469 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2470 == BUILT_IN_GOMP_CANCEL
2471 && !integer_zerop (gimple_call_arg (stmt, 1)))
2473 ctx->cancellable = true;
2474 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2475 OMP_CLAUSE_NOWAIT))
2476 warning_at (gimple_location (stmt), 0,
2477 "%<#pragma omp cancel for%> inside "
2478 "%<nowait%> for construct");
2479 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2480 OMP_CLAUSE_ORDERED))
2481 warning_at (gimple_location (stmt), 0,
2482 "%<#pragma omp cancel for%> inside "
2483 "%<ordered%> for construct");
2485 kind = "for";
2486 break;
2487 case 4:
2488 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2489 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2490 bad = "#pragma omp sections";
2491 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2492 == BUILT_IN_GOMP_CANCEL
2493 && !integer_zerop (gimple_call_arg (stmt, 1)))
2495 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2497 ctx->cancellable = true;
2498 if (find_omp_clause (gimple_omp_sections_clauses
2499 (ctx->stmt),
2500 OMP_CLAUSE_NOWAIT))
2501 warning_at (gimple_location (stmt), 0,
2502 "%<#pragma omp cancel sections%> inside "
2503 "%<nowait%> sections construct");
2505 else
2507 gcc_assert (ctx->outer
2508 && gimple_code (ctx->outer->stmt)
2509 == GIMPLE_OMP_SECTIONS);
2510 ctx->outer->cancellable = true;
2511 if (find_omp_clause (gimple_omp_sections_clauses
2512 (ctx->outer->stmt),
2513 OMP_CLAUSE_NOWAIT))
2514 warning_at (gimple_location (stmt), 0,
2515 "%<#pragma omp cancel sections%> inside "
2516 "%<nowait%> sections construct");
2519 kind = "sections";
2520 break;
2521 case 8:
2522 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2523 bad = "#pragma omp task";
2524 else
2525 ctx->cancellable = true;
2526 kind = "taskgroup";
2527 break;
2528 default:
2529 error_at (gimple_location (stmt), "invalid arguments");
2530 return false;
2532 if (bad)
2534 error_at (gimple_location (stmt),
2535 "%<%s %s%> construct not closely nested inside of %qs",
2536 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2537 == BUILT_IN_GOMP_CANCEL
2538 ? "#pragma omp cancel"
2539 : "#pragma omp cancellation point", kind, bad);
2540 return false;
2543 /* FALLTHRU */
2544 case GIMPLE_OMP_SECTIONS:
2545 case GIMPLE_OMP_SINGLE:
2546 for (; ctx != NULL; ctx = ctx->outer)
2547 switch (gimple_code (ctx->stmt))
2549 case GIMPLE_OMP_FOR:
2550 case GIMPLE_OMP_SECTIONS:
2551 case GIMPLE_OMP_SINGLE:
2552 case GIMPLE_OMP_ORDERED:
2553 case GIMPLE_OMP_MASTER:
2554 case GIMPLE_OMP_TASK:
2555 case GIMPLE_OMP_CRITICAL:
2556 if (is_gimple_call (stmt))
2558 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2559 != BUILT_IN_GOMP_BARRIER)
2560 return true;
2561 error_at (gimple_location (stmt),
2562 "barrier region may not be closely nested inside "
2563 "of work-sharing, critical, ordered, master or "
2564 "explicit task region");
2565 return false;
2567 error_at (gimple_location (stmt),
2568 "work-sharing region may not be closely nested inside "
2569 "of work-sharing, critical, ordered, master or explicit "
2570 "task region");
2571 return false;
2572 case GIMPLE_OMP_PARALLEL:
2573 return true;
2574 default:
2575 break;
2577 break;
2578 case GIMPLE_OMP_MASTER:
2579 for (; ctx != NULL; ctx = ctx->outer)
2580 switch (gimple_code (ctx->stmt))
2582 case GIMPLE_OMP_FOR:
2583 case GIMPLE_OMP_SECTIONS:
2584 case GIMPLE_OMP_SINGLE:
2585 case GIMPLE_OMP_TASK:
2586 error_at (gimple_location (stmt),
2587 "master region may not be closely nested inside "
2588 "of work-sharing or explicit task region");
2589 return false;
2590 case GIMPLE_OMP_PARALLEL:
2591 return true;
2592 default:
2593 break;
2595 break;
2596 case GIMPLE_OMP_ORDERED:
2597 for (; ctx != NULL; ctx = ctx->outer)
2598 switch (gimple_code (ctx->stmt))
2600 case GIMPLE_OMP_CRITICAL:
2601 case GIMPLE_OMP_TASK:
2602 error_at (gimple_location (stmt),
2603 "ordered region may not be closely nested inside "
2604 "of critical or explicit task region");
2605 return false;
2606 case GIMPLE_OMP_FOR:
2607 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2608 OMP_CLAUSE_ORDERED) == NULL)
2610 error_at (gimple_location (stmt),
2611 "ordered region must be closely nested inside "
2612 "a loop region with an ordered clause");
2613 return false;
2615 return true;
2616 case GIMPLE_OMP_PARALLEL:
2617 error_at (gimple_location (stmt),
2618 "ordered region must be closely nested inside "
2619 "a loop region with an ordered clause");
2620 return false;
2621 default:
2622 break;
2624 break;
2625 case GIMPLE_OMP_CRITICAL:
2627 tree this_stmt_name =
2628 gimple_omp_critical_name (as_a <gimple_omp_critical> (stmt));
2629 for (; ctx != NULL; ctx = ctx->outer)
2630 if (gimple_omp_critical other_crit =
2631 dyn_cast <gimple_omp_critical> (ctx->stmt))
2632 if (this_stmt_name == gimple_omp_critical_name (other_crit))
2634 error_at (gimple_location (stmt),
2635 "critical region may not be nested inside a critical "
2636 "region with the same name");
2637 return false;
2640 break;
2641 case GIMPLE_OMP_TEAMS:
2642 if (ctx == NULL
2643 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2644 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2646 error_at (gimple_location (stmt),
2647 "teams construct not closely nested inside of target "
2648 "region");
2649 return false;
2651 break;
2652 case GIMPLE_OMP_TARGET:
2653 for (; ctx != NULL; ctx = ctx->outer)
2654 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
2655 && gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_REGION)
2657 const char *name;
2658 switch (gimple_omp_target_kind (stmt))
2660 case GF_OMP_TARGET_KIND_REGION: name = "target"; break;
2661 case GF_OMP_TARGET_KIND_DATA: name = "target data"; break;
2662 case GF_OMP_TARGET_KIND_UPDATE: name = "target update"; break;
2663 default: gcc_unreachable ();
2665 warning_at (gimple_location (stmt), 0,
2666 "%s construct inside of target region", name);
2668 break;
2669 default:
2670 break;
2672 return true;
2676 /* Helper function scan_omp.
2678 Callback for walk_tree or operators in walk_gimple_stmt used to
2679 scan for OpenMP directives in TP. */
2681 static tree
2682 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2684 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2685 omp_context *ctx = (omp_context *) wi->info;
2686 tree t = *tp;
2688 switch (TREE_CODE (t))
2690 case VAR_DECL:
2691 case PARM_DECL:
2692 case LABEL_DECL:
2693 case RESULT_DECL:
2694 if (ctx)
2695 *tp = remap_decl (t, &ctx->cb);
2696 break;
2698 default:
2699 if (ctx && TYPE_P (t))
2700 *tp = remap_type (t, &ctx->cb);
2701 else if (!DECL_P (t))
2703 *walk_subtrees = 1;
2704 if (ctx)
2706 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2707 if (tem != TREE_TYPE (t))
2709 if (TREE_CODE (t) == INTEGER_CST)
2710 *tp = wide_int_to_tree (tem, t);
2711 else
2712 TREE_TYPE (t) = tem;
2716 break;
2719 return NULL_TREE;
2722 /* Return true if FNDECL is a setjmp or a longjmp. */
2724 static bool
2725 setjmp_or_longjmp_p (const_tree fndecl)
2727 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2728 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2729 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2730 return true;
2732 tree declname = DECL_NAME (fndecl);
2733 if (!declname)
2734 return false;
2735 const char *name = IDENTIFIER_POINTER (declname);
2736 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2740 /* Helper function for scan_omp.
2742 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2743 the current statement in GSI. */
2745 static tree
2746 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2747 struct walk_stmt_info *wi)
2749 gimple stmt = gsi_stmt (*gsi);
2750 omp_context *ctx = (omp_context *) wi->info;
2752 if (gimple_has_location (stmt))
2753 input_location = gimple_location (stmt);
2755 /* Check the OpenMP nesting restrictions. */
2756 bool remove = false;
2757 if (is_gimple_omp (stmt))
2758 remove = !check_omp_nesting_restrictions (stmt, ctx);
2759 else if (is_gimple_call (stmt))
2761 tree fndecl = gimple_call_fndecl (stmt);
2762 if (fndecl)
2764 if (setjmp_or_longjmp_p (fndecl)
2765 && ctx
2766 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2767 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2769 remove = true;
2770 error_at (gimple_location (stmt),
2771 "setjmp/longjmp inside simd construct");
2773 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2774 switch (DECL_FUNCTION_CODE (fndecl))
2776 case BUILT_IN_GOMP_BARRIER:
2777 case BUILT_IN_GOMP_CANCEL:
2778 case BUILT_IN_GOMP_CANCELLATION_POINT:
2779 case BUILT_IN_GOMP_TASKYIELD:
2780 case BUILT_IN_GOMP_TASKWAIT:
2781 case BUILT_IN_GOMP_TASKGROUP_START:
2782 case BUILT_IN_GOMP_TASKGROUP_END:
2783 remove = !check_omp_nesting_restrictions (stmt, ctx);
2784 break;
2785 default:
2786 break;
2790 if (remove)
2792 stmt = gimple_build_nop ();
2793 gsi_replace (gsi, stmt, false);
2796 *handled_ops_p = true;
2798 switch (gimple_code (stmt))
2800 case GIMPLE_OMP_PARALLEL:
2801 taskreg_nesting_level++;
2802 scan_omp_parallel (gsi, ctx);
2803 taskreg_nesting_level--;
2804 break;
2806 case GIMPLE_OMP_TASK:
2807 taskreg_nesting_level++;
2808 scan_omp_task (gsi, ctx);
2809 taskreg_nesting_level--;
2810 break;
2812 case GIMPLE_OMP_FOR:
2813 scan_omp_for (as_a <gimple_omp_for> (stmt), ctx);
2814 break;
2816 case GIMPLE_OMP_SECTIONS:
2817 scan_omp_sections (stmt, ctx);
2818 break;
2820 case GIMPLE_OMP_SINGLE:
2821 scan_omp_single (stmt, ctx);
2822 break;
2824 case GIMPLE_OMP_SECTION:
2825 case GIMPLE_OMP_MASTER:
2826 case GIMPLE_OMP_TASKGROUP:
2827 case GIMPLE_OMP_ORDERED:
2828 case GIMPLE_OMP_CRITICAL:
2829 ctx = new_omp_context (stmt, ctx);
2830 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2831 break;
2833 case GIMPLE_OMP_TARGET:
2834 scan_omp_target (stmt, ctx);
2835 break;
2837 case GIMPLE_OMP_TEAMS:
2838 scan_omp_teams (stmt, ctx);
2839 break;
2841 case GIMPLE_BIND:
2843 tree var;
2845 *handled_ops_p = false;
2846 if (ctx)
2847 for (var = gimple_bind_vars (as_a <gimple_bind> (stmt));
2848 var ;
2849 var = DECL_CHAIN (var))
2850 insert_decl_map (&ctx->cb, var, var);
2852 break;
2853 default:
2854 *handled_ops_p = false;
2855 break;
2858 return NULL_TREE;
2862 /* Scan all the statements starting at the current statement. CTX
2863 contains context information about the OpenMP directives and
2864 clauses found during the scan. */
2866 static void
2867 scan_omp (gimple_seq *body_p, omp_context *ctx)
2869 location_t saved_location;
2870 struct walk_stmt_info wi;
2872 memset (&wi, 0, sizeof (wi));
2873 wi.info = ctx;
2874 wi.want_locations = true;
2876 saved_location = input_location;
2877 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2878 input_location = saved_location;
2881 /* Re-gimplification and code generation routines. */
2883 /* Build a call to GOMP_barrier. */
2885 static gimple
2886 build_omp_barrier (tree lhs)
2888 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2889 : BUILT_IN_GOMP_BARRIER);
2890 gimple_call g = gimple_build_call (fndecl, 0);
2891 if (lhs)
2892 gimple_call_set_lhs (g, lhs);
2893 return g;
2896 /* If a context was created for STMT when it was scanned, return it. */
2898 static omp_context *
2899 maybe_lookup_ctx (gimple stmt)
2901 splay_tree_node n;
2902 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2903 return n ? (omp_context *) n->value : NULL;
2907 /* Find the mapping for DECL in CTX or the immediately enclosing
2908 context that has a mapping for DECL.
2910 If CTX is a nested parallel directive, we may have to use the decl
2911 mappings created in CTX's parent context. Suppose that we have the
2912 following parallel nesting (variable UIDs showed for clarity):
2914 iD.1562 = 0;
2915 #omp parallel shared(iD.1562) -> outer parallel
2916 iD.1562 = iD.1562 + 1;
2918 #omp parallel shared (iD.1562) -> inner parallel
2919 iD.1562 = iD.1562 - 1;
2921 Each parallel structure will create a distinct .omp_data_s structure
2922 for copying iD.1562 in/out of the directive:
2924 outer parallel .omp_data_s.1.i -> iD.1562
2925 inner parallel .omp_data_s.2.i -> iD.1562
2927 A shared variable mapping will produce a copy-out operation before
2928 the parallel directive and a copy-in operation after it. So, in
2929 this case we would have:
2931 iD.1562 = 0;
2932 .omp_data_o.1.i = iD.1562;
2933 #omp parallel shared(iD.1562) -> outer parallel
2934 .omp_data_i.1 = &.omp_data_o.1
2935 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2937 .omp_data_o.2.i = iD.1562; -> **
2938 #omp parallel shared(iD.1562) -> inner parallel
2939 .omp_data_i.2 = &.omp_data_o.2
2940 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2943 ** This is a problem. The symbol iD.1562 cannot be referenced
2944 inside the body of the outer parallel region. But since we are
2945 emitting this copy operation while expanding the inner parallel
2946 directive, we need to access the CTX structure of the outer
2947 parallel directive to get the correct mapping:
2949 .omp_data_o.2.i = .omp_data_i.1->i
2951 Since there may be other workshare or parallel directives enclosing
2952 the parallel directive, it may be necessary to walk up the context
2953 parent chain. This is not a problem in general because nested
2954 parallelism happens only rarely. */
2956 static tree
2957 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2959 tree t;
2960 omp_context *up;
2962 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2963 t = maybe_lookup_decl (decl, up);
2965 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2967 return t ? t : decl;
2971 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2972 in outer contexts. */
2974 static tree
2975 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2977 tree t = NULL;
2978 omp_context *up;
2980 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2981 t = maybe_lookup_decl (decl, up);
2983 return t ? t : decl;
2987 /* Construct the initialization value for reduction CLAUSE. */
2989 tree
2990 omp_reduction_init (tree clause, tree type)
2992 location_t loc = OMP_CLAUSE_LOCATION (clause);
2993 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2995 case PLUS_EXPR:
2996 case MINUS_EXPR:
2997 case BIT_IOR_EXPR:
2998 case BIT_XOR_EXPR:
2999 case TRUTH_OR_EXPR:
3000 case TRUTH_ORIF_EXPR:
3001 case TRUTH_XOR_EXPR:
3002 case NE_EXPR:
3003 return build_zero_cst (type);
3005 case MULT_EXPR:
3006 case TRUTH_AND_EXPR:
3007 case TRUTH_ANDIF_EXPR:
3008 case EQ_EXPR:
3009 return fold_convert_loc (loc, type, integer_one_node);
3011 case BIT_AND_EXPR:
3012 return fold_convert_loc (loc, type, integer_minus_one_node);
3014 case MAX_EXPR:
3015 if (SCALAR_FLOAT_TYPE_P (type))
3017 REAL_VALUE_TYPE max, min;
3018 if (HONOR_INFINITIES (TYPE_MODE (type)))
3020 real_inf (&max);
3021 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3023 else
3024 real_maxval (&min, 1, TYPE_MODE (type));
3025 return build_real (type, min);
3027 else
3029 gcc_assert (INTEGRAL_TYPE_P (type));
3030 return TYPE_MIN_VALUE (type);
3033 case MIN_EXPR:
3034 if (SCALAR_FLOAT_TYPE_P (type))
3036 REAL_VALUE_TYPE max;
3037 if (HONOR_INFINITIES (TYPE_MODE (type)))
3038 real_inf (&max);
3039 else
3040 real_maxval (&max, 0, TYPE_MODE (type));
3041 return build_real (type, max);
3043 else
3045 gcc_assert (INTEGRAL_TYPE_P (type));
3046 return TYPE_MAX_VALUE (type);
3049 default:
3050 gcc_unreachable ();
3054 /* Return alignment to be assumed for var in CLAUSE, which should be
3055 OMP_CLAUSE_ALIGNED. */
3057 static tree
3058 omp_clause_aligned_alignment (tree clause)
3060 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3061 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3063 /* Otherwise return implementation defined alignment. */
3064 unsigned int al = 1;
3065 enum machine_mode mode, vmode;
3066 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3067 if (vs)
3068 vs = 1 << floor_log2 (vs);
3069 static enum mode_class classes[]
3070 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3071 for (int i = 0; i < 4; i += 2)
3072 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3073 mode != VOIDmode;
3074 mode = GET_MODE_WIDER_MODE (mode))
3076 vmode = targetm.vectorize.preferred_simd_mode (mode);
3077 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3078 continue;
3079 while (vs
3080 && GET_MODE_SIZE (vmode) < vs
3081 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3082 vmode = GET_MODE_2XWIDER_MODE (vmode);
3084 tree type = lang_hooks.types.type_for_mode (mode, 1);
3085 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3086 continue;
3087 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3088 / GET_MODE_SIZE (mode));
3089 if (TYPE_MODE (type) != vmode)
3090 continue;
3091 if (TYPE_ALIGN_UNIT (type) > al)
3092 al = TYPE_ALIGN_UNIT (type);
3094 return build_int_cst (integer_type_node, al);
3097 /* Return maximum possible vectorization factor for the target. */
3099 static int
3100 omp_max_vf (void)
3102 if (!optimize
3103 || optimize_debug
3104 || !flag_tree_loop_optimize
3105 || (!flag_tree_loop_vectorize
3106 && (global_options_set.x_flag_tree_loop_vectorize
3107 || global_options_set.x_flag_tree_vectorize)))
3108 return 1;
3110 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3111 if (vs)
3113 vs = 1 << floor_log2 (vs);
3114 return vs;
3116 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3117 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3118 return GET_MODE_NUNITS (vqimode);
3119 return 1;
3122 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3123 privatization. */
3125 static bool
3126 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3127 tree &idx, tree &lane, tree &ivar, tree &lvar)
3129 if (max_vf == 0)
3131 max_vf = omp_max_vf ();
3132 if (max_vf > 1)
3134 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3135 OMP_CLAUSE_SAFELEN);
3136 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3137 max_vf = 1;
3138 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3139 max_vf) == -1)
3140 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3142 if (max_vf > 1)
3144 idx = create_tmp_var (unsigned_type_node, NULL);
3145 lane = create_tmp_var (unsigned_type_node, NULL);
3148 if (max_vf == 1)
3149 return false;
3151 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3152 tree avar = create_tmp_var_raw (atype, NULL);
3153 if (TREE_ADDRESSABLE (new_var))
3154 TREE_ADDRESSABLE (avar) = 1;
3155 DECL_ATTRIBUTES (avar)
3156 = tree_cons (get_identifier ("omp simd array"), NULL,
3157 DECL_ATTRIBUTES (avar));
3158 gimple_add_tmp_var (avar);
3159 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3160 NULL_TREE, NULL_TREE);
3161 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3162 NULL_TREE, NULL_TREE);
3163 if (DECL_P (new_var))
3165 SET_DECL_VALUE_EXPR (new_var, lvar);
3166 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3168 return true;
3171 /* Helper function of lower_rec_input_clauses. For a reference
3172 in simd reduction, add an underlying variable it will reference. */
3174 static void
3175 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3177 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3178 if (TREE_CONSTANT (z))
3180 const char *name = NULL;
3181 if (DECL_NAME (new_vard))
3182 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3184 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3185 gimple_add_tmp_var (z);
3186 TREE_ADDRESSABLE (z) = 1;
3187 z = build_fold_addr_expr_loc (loc, z);
3188 gimplify_assign (new_vard, z, ilist);
3192 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3193 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3194 private variables. Initialization statements go in ILIST, while calls
3195 to destructors go in DLIST. */
3197 static void
3198 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3199 omp_context *ctx, struct omp_for_data *fd)
3201 tree c, dtor, copyin_seq, x, ptr;
3202 bool copyin_by_ref = false;
3203 bool lastprivate_firstprivate = false;
3204 bool reduction_omp_orig_ref = false;
3205 int pass;
3206 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3207 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3208 int max_vf = 0;
3209 tree lane = NULL_TREE, idx = NULL_TREE;
3210 tree ivar = NULL_TREE, lvar = NULL_TREE;
3211 gimple_seq llist[2] = { NULL, NULL };
3213 copyin_seq = NULL;
3215 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3216 with data sharing clauses referencing variable sized vars. That
3217 is unnecessarily hard to support and very unlikely to result in
3218 vectorized code anyway. */
3219 if (is_simd)
3220 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3221 switch (OMP_CLAUSE_CODE (c))
3223 case OMP_CLAUSE_LINEAR:
3224 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3225 max_vf = 1;
3226 /* FALLTHRU */
3227 case OMP_CLAUSE_REDUCTION:
3228 case OMP_CLAUSE_PRIVATE:
3229 case OMP_CLAUSE_FIRSTPRIVATE:
3230 case OMP_CLAUSE_LASTPRIVATE:
3231 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3232 max_vf = 1;
3233 break;
3234 default:
3235 continue;
3238 /* Do all the fixed sized types in the first pass, and the variable sized
3239 types in the second pass. This makes sure that the scalar arguments to
3240 the variable sized types are processed before we use them in the
3241 variable sized operations. */
3242 for (pass = 0; pass < 2; ++pass)
3244 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3246 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3247 tree var, new_var;
3248 bool by_ref;
3249 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3251 switch (c_kind)
3253 case OMP_CLAUSE_PRIVATE:
3254 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3255 continue;
3256 break;
3257 case OMP_CLAUSE_SHARED:
3258 /* Ignore shared directives in teams construct. */
3259 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3260 continue;
3261 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3263 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3264 continue;
3266 case OMP_CLAUSE_FIRSTPRIVATE:
3267 case OMP_CLAUSE_COPYIN:
3268 case OMP_CLAUSE_LINEAR:
3269 break;
3270 case OMP_CLAUSE_REDUCTION:
3271 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3272 reduction_omp_orig_ref = true;
3273 break;
3274 case OMP_CLAUSE__LOOPTEMP_:
3275 /* Handle _looptemp_ clauses only on parallel. */
3276 if (fd)
3277 continue;
3278 break;
3279 case OMP_CLAUSE_LASTPRIVATE:
3280 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3282 lastprivate_firstprivate = true;
3283 if (pass != 0)
3284 continue;
3286 /* Even without corresponding firstprivate, if
3287 decl is Fortran allocatable, it needs outer var
3288 reference. */
3289 else if (pass == 0
3290 && lang_hooks.decls.omp_private_outer_ref
3291 (OMP_CLAUSE_DECL (c)))
3292 lastprivate_firstprivate = true;
3293 break;
3294 case OMP_CLAUSE_ALIGNED:
3295 if (pass == 0)
3296 continue;
3297 var = OMP_CLAUSE_DECL (c);
3298 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3299 && !is_global_var (var))
3301 new_var = maybe_lookup_decl (var, ctx);
3302 if (new_var == NULL_TREE)
3303 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3304 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3305 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3306 omp_clause_aligned_alignment (c));
3307 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3308 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3309 gimplify_and_add (x, ilist);
3311 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3312 && is_global_var (var))
3314 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3315 new_var = lookup_decl (var, ctx);
3316 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3317 t = build_fold_addr_expr_loc (clause_loc, t);
3318 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3319 t = build_call_expr_loc (clause_loc, t2, 2, t,
3320 omp_clause_aligned_alignment (c));
3321 t = fold_convert_loc (clause_loc, ptype, t);
3322 x = create_tmp_var (ptype, NULL);
3323 t = build2 (MODIFY_EXPR, ptype, x, t);
3324 gimplify_and_add (t, ilist);
3325 t = build_simple_mem_ref_loc (clause_loc, x);
3326 SET_DECL_VALUE_EXPR (new_var, t);
3327 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3329 continue;
3330 default:
3331 continue;
3334 new_var = var = OMP_CLAUSE_DECL (c);
3335 if (c_kind != OMP_CLAUSE_COPYIN)
3336 new_var = lookup_decl (var, ctx);
3338 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3340 if (pass != 0)
3341 continue;
3343 else if (is_variable_sized (var))
3345 /* For variable sized types, we need to allocate the
3346 actual storage here. Call alloca and store the
3347 result in the pointer decl that we created elsewhere. */
3348 if (pass == 0)
3349 continue;
3351 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3353 gimple_call stmt;
3354 tree tmp, atmp;
3356 ptr = DECL_VALUE_EXPR (new_var);
3357 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3358 ptr = TREE_OPERAND (ptr, 0);
3359 gcc_assert (DECL_P (ptr));
3360 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3362 /* void *tmp = __builtin_alloca */
3363 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3364 stmt = gimple_build_call (atmp, 1, x);
3365 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3366 gimple_add_tmp_var (tmp);
3367 gimple_call_set_lhs (stmt, tmp);
3369 gimple_seq_add_stmt (ilist, stmt);
3371 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3372 gimplify_assign (ptr, x, ilist);
3375 else if (is_reference (var))
3377 /* For references that are being privatized for Fortran,
3378 allocate new backing storage for the new pointer
3379 variable. This allows us to avoid changing all the
3380 code that expects a pointer to something that expects
3381 a direct variable. */
3382 if (pass == 0)
3383 continue;
3385 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3386 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3388 x = build_receiver_ref (var, false, ctx);
3389 x = build_fold_addr_expr_loc (clause_loc, x);
3391 else if (TREE_CONSTANT (x))
3393 /* For reduction in SIMD loop, defer adding the
3394 initialization of the reference, because if we decide
3395 to use SIMD array for it, the initilization could cause
3396 expansion ICE. */
3397 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3398 x = NULL_TREE;
3399 else
3401 const char *name = NULL;
3402 if (DECL_NAME (var))
3403 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3405 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3406 name);
3407 gimple_add_tmp_var (x);
3408 TREE_ADDRESSABLE (x) = 1;
3409 x = build_fold_addr_expr_loc (clause_loc, x);
3412 else
3414 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3415 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3418 if (x)
3420 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3421 gimplify_assign (new_var, x, ilist);
3424 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3426 else if (c_kind == OMP_CLAUSE_REDUCTION
3427 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3429 if (pass == 0)
3430 continue;
3432 else if (pass != 0)
3433 continue;
3435 switch (OMP_CLAUSE_CODE (c))
3437 case OMP_CLAUSE_SHARED:
3438 /* Ignore shared directives in teams construct. */
3439 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3440 continue;
3441 /* Shared global vars are just accessed directly. */
3442 if (is_global_var (new_var))
3443 break;
3444 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3445 needs to be delayed until after fixup_child_record_type so
3446 that we get the correct type during the dereference. */
3447 by_ref = use_pointer_for_field (var, ctx);
3448 x = build_receiver_ref (var, by_ref, ctx);
3449 SET_DECL_VALUE_EXPR (new_var, x);
3450 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3452 /* ??? If VAR is not passed by reference, and the variable
3453 hasn't been initialized yet, then we'll get a warning for
3454 the store into the omp_data_s structure. Ideally, we'd be
3455 able to notice this and not store anything at all, but
3456 we're generating code too early. Suppress the warning. */
3457 if (!by_ref)
3458 TREE_NO_WARNING (var) = 1;
3459 break;
3461 case OMP_CLAUSE_LASTPRIVATE:
3462 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3463 break;
3464 /* FALLTHRU */
3466 case OMP_CLAUSE_PRIVATE:
3467 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3468 x = build_outer_var_ref (var, ctx);
3469 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3471 if (is_task_ctx (ctx))
3472 x = build_receiver_ref (var, false, ctx);
3473 else
3474 x = build_outer_var_ref (var, ctx);
3476 else
3477 x = NULL;
3478 do_private:
3479 tree nx;
3480 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3481 if (is_simd)
3483 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3484 if ((TREE_ADDRESSABLE (new_var) || nx || y
3485 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3486 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3487 idx, lane, ivar, lvar))
3489 if (nx)
3490 x = lang_hooks.decls.omp_clause_default_ctor
3491 (c, unshare_expr (ivar), x);
3492 if (nx && x)
3493 gimplify_and_add (x, &llist[0]);
3494 if (y)
3496 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3497 if (y)
3499 gimple_seq tseq = NULL;
3501 dtor = y;
3502 gimplify_stmt (&dtor, &tseq);
3503 gimple_seq_add_seq (&llist[1], tseq);
3506 break;
3509 if (nx)
3510 gimplify_and_add (nx, ilist);
3511 /* FALLTHRU */
3513 do_dtor:
3514 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3515 if (x)
3517 gimple_seq tseq = NULL;
3519 dtor = x;
3520 gimplify_stmt (&dtor, &tseq);
3521 gimple_seq_add_seq (dlist, tseq);
3523 break;
3525 case OMP_CLAUSE_LINEAR:
3526 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3527 goto do_firstprivate;
3528 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3529 x = NULL;
3530 else
3531 x = build_outer_var_ref (var, ctx);
3532 goto do_private;
3534 case OMP_CLAUSE_FIRSTPRIVATE:
3535 if (is_task_ctx (ctx))
3537 if (is_reference (var) || is_variable_sized (var))
3538 goto do_dtor;
3539 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3540 ctx))
3541 || use_pointer_for_field (var, NULL))
3543 x = build_receiver_ref (var, false, ctx);
3544 SET_DECL_VALUE_EXPR (new_var, x);
3545 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3546 goto do_dtor;
3549 do_firstprivate:
3550 x = build_outer_var_ref (var, ctx);
3551 if (is_simd)
3553 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3554 && gimple_omp_for_combined_into_p (ctx->stmt))
3556 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3557 tree stept = TREE_TYPE (t);
3558 tree ct = find_omp_clause (clauses,
3559 OMP_CLAUSE__LOOPTEMP_);
3560 gcc_assert (ct);
3561 tree l = OMP_CLAUSE_DECL (ct);
3562 tree n1 = fd->loop.n1;
3563 tree step = fd->loop.step;
3564 tree itype = TREE_TYPE (l);
3565 if (POINTER_TYPE_P (itype))
3566 itype = signed_type_for (itype);
3567 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3568 if (TYPE_UNSIGNED (itype)
3569 && fd->loop.cond_code == GT_EXPR)
3570 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3571 fold_build1 (NEGATE_EXPR, itype, l),
3572 fold_build1 (NEGATE_EXPR,
3573 itype, step));
3574 else
3575 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3576 t = fold_build2 (MULT_EXPR, stept,
3577 fold_convert (stept, l), t);
3579 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3581 x = lang_hooks.decls.omp_clause_linear_ctor
3582 (c, new_var, x, t);
3583 gimplify_and_add (x, ilist);
3584 goto do_dtor;
3587 if (POINTER_TYPE_P (TREE_TYPE (x)))
3588 x = fold_build2 (POINTER_PLUS_EXPR,
3589 TREE_TYPE (x), x, t);
3590 else
3591 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3594 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3595 || TREE_ADDRESSABLE (new_var))
3596 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3597 idx, lane, ivar, lvar))
3599 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3601 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3602 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3603 gimplify_and_add (x, ilist);
3604 gimple_stmt_iterator gsi
3605 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3606 gimple g
3607 = gimple_build_assign (unshare_expr (lvar), iv);
3608 gsi_insert_before_without_update (&gsi, g,
3609 GSI_SAME_STMT);
3610 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3611 enum tree_code code = PLUS_EXPR;
3612 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3613 code = POINTER_PLUS_EXPR;
3614 g = gimple_build_assign_with_ops (code, iv, iv, t);
3615 gsi_insert_before_without_update (&gsi, g,
3616 GSI_SAME_STMT);
3617 break;
3619 x = lang_hooks.decls.omp_clause_copy_ctor
3620 (c, unshare_expr (ivar), x);
3621 gimplify_and_add (x, &llist[0]);
3622 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3623 if (x)
3625 gimple_seq tseq = NULL;
3627 dtor = x;
3628 gimplify_stmt (&dtor, &tseq);
3629 gimple_seq_add_seq (&llist[1], tseq);
3631 break;
3634 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3635 gimplify_and_add (x, ilist);
3636 goto do_dtor;
3638 case OMP_CLAUSE__LOOPTEMP_:
3639 gcc_assert (is_parallel_ctx (ctx));
3640 x = build_outer_var_ref (var, ctx);
3641 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3642 gimplify_and_add (x, ilist);
3643 break;
3645 case OMP_CLAUSE_COPYIN:
3646 by_ref = use_pointer_for_field (var, NULL);
3647 x = build_receiver_ref (var, by_ref, ctx);
3648 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3649 append_to_statement_list (x, &copyin_seq);
3650 copyin_by_ref |= by_ref;
3651 break;
3653 case OMP_CLAUSE_REDUCTION:
3654 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3656 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3657 gimple tseq;
3658 x = build_outer_var_ref (var, ctx);
3660 if (is_reference (var)
3661 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3662 TREE_TYPE (x)))
3663 x = build_fold_addr_expr_loc (clause_loc, x);
3664 SET_DECL_VALUE_EXPR (placeholder, x);
3665 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3666 tree new_vard = new_var;
3667 if (is_reference (var))
3669 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3670 new_vard = TREE_OPERAND (new_var, 0);
3671 gcc_assert (DECL_P (new_vard));
3673 if (is_simd
3674 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3675 idx, lane, ivar, lvar))
3677 if (new_vard == new_var)
3679 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3680 SET_DECL_VALUE_EXPR (new_var, ivar);
3682 else
3684 SET_DECL_VALUE_EXPR (new_vard,
3685 build_fold_addr_expr (ivar));
3686 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3688 x = lang_hooks.decls.omp_clause_default_ctor
3689 (c, unshare_expr (ivar),
3690 build_outer_var_ref (var, ctx));
3691 if (x)
3692 gimplify_and_add (x, &llist[0]);
3693 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3695 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3696 lower_omp (&tseq, ctx);
3697 gimple_seq_add_seq (&llist[0], tseq);
3699 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3700 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3701 lower_omp (&tseq, ctx);
3702 gimple_seq_add_seq (&llist[1], tseq);
3703 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3704 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3705 if (new_vard == new_var)
3706 SET_DECL_VALUE_EXPR (new_var, lvar);
3707 else
3708 SET_DECL_VALUE_EXPR (new_vard,
3709 build_fold_addr_expr (lvar));
3710 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3711 if (x)
3713 tseq = NULL;
3714 dtor = x;
3715 gimplify_stmt (&dtor, &tseq);
3716 gimple_seq_add_seq (&llist[1], tseq);
3718 break;
3720 /* If this is a reference to constant size reduction var
3721 with placeholder, we haven't emitted the initializer
3722 for it because it is undesirable if SIMD arrays are used.
3723 But if they aren't used, we need to emit the deferred
3724 initialization now. */
3725 else if (is_reference (var) && is_simd)
3726 handle_simd_reference (clause_loc, new_vard, ilist);
3727 x = lang_hooks.decls.omp_clause_default_ctor
3728 (c, unshare_expr (new_var),
3729 build_outer_var_ref (var, ctx));
3730 if (x)
3731 gimplify_and_add (x, ilist);
3732 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3734 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3735 lower_omp (&tseq, ctx);
3736 gimple_seq_add_seq (ilist, tseq);
3738 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3739 if (is_simd)
3741 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3742 lower_omp (&tseq, ctx);
3743 gimple_seq_add_seq (dlist, tseq);
3744 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3746 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3747 goto do_dtor;
3749 else
3751 x = omp_reduction_init (c, TREE_TYPE (new_var));
3752 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3753 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3755 /* reduction(-:var) sums up the partial results, so it
3756 acts identically to reduction(+:var). */
3757 if (code == MINUS_EXPR)
3758 code = PLUS_EXPR;
3760 tree new_vard = new_var;
3761 if (is_simd && is_reference (var))
3763 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3764 new_vard = TREE_OPERAND (new_var, 0);
3765 gcc_assert (DECL_P (new_vard));
3767 if (is_simd
3768 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3769 idx, lane, ivar, lvar))
3771 tree ref = build_outer_var_ref (var, ctx);
3773 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3775 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3776 ref = build_outer_var_ref (var, ctx);
3777 gimplify_assign (ref, x, &llist[1]);
3779 if (new_vard != new_var)
3781 SET_DECL_VALUE_EXPR (new_vard,
3782 build_fold_addr_expr (lvar));
3783 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3786 else
3788 if (is_reference (var) && is_simd)
3789 handle_simd_reference (clause_loc, new_vard, ilist);
3790 gimplify_assign (new_var, x, ilist);
3791 if (is_simd)
3793 tree ref = build_outer_var_ref (var, ctx);
3795 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3796 ref = build_outer_var_ref (var, ctx);
3797 gimplify_assign (ref, x, dlist);
3801 break;
3803 default:
3804 gcc_unreachable ();
3809 if (lane)
3811 tree uid = create_tmp_var (ptr_type_node, "simduid");
3812 /* Don't want uninit warnings on simduid, it is always uninitialized,
3813 but we use it not for the value, but for the DECL_UID only. */
3814 TREE_NO_WARNING (uid) = 1;
3815 gimple g
3816 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3817 gimple_call_set_lhs (g, lane);
3818 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3819 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3820 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3821 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3822 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3823 gimple_omp_for_set_clauses (ctx->stmt, c);
3824 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3825 build_int_cst (unsigned_type_node, 0),
3826 NULL_TREE);
3827 gimple_seq_add_stmt (ilist, g);
3828 for (int i = 0; i < 2; i++)
3829 if (llist[i])
3831 tree vf = create_tmp_var (unsigned_type_node, NULL);
3832 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3833 gimple_call_set_lhs (g, vf);
3834 gimple_seq *seq = i == 0 ? ilist : dlist;
3835 gimple_seq_add_stmt (seq, g);
3836 tree t = build_int_cst (unsigned_type_node, 0);
3837 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3838 gimple_seq_add_stmt (seq, g);
3839 tree body = create_artificial_label (UNKNOWN_LOCATION);
3840 tree header = create_artificial_label (UNKNOWN_LOCATION);
3841 tree end = create_artificial_label (UNKNOWN_LOCATION);
3842 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3843 gimple_seq_add_stmt (seq, gimple_build_label (body));
3844 gimple_seq_add_seq (seq, llist[i]);
3845 t = build_int_cst (unsigned_type_node, 1);
3846 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3847 gimple_seq_add_stmt (seq, g);
3848 gimple_seq_add_stmt (seq, gimple_build_label (header));
3849 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3850 gimple_seq_add_stmt (seq, g);
3851 gimple_seq_add_stmt (seq, gimple_build_label (end));
3855 /* The copyin sequence is not to be executed by the main thread, since
3856 that would result in self-copies. Perhaps not visible to scalars,
3857 but it certainly is to C++ operator=. */
3858 if (copyin_seq)
3860 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3862 x = build2 (NE_EXPR, boolean_type_node, x,
3863 build_int_cst (TREE_TYPE (x), 0));
3864 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3865 gimplify_and_add (x, ilist);
3868 /* If any copyin variable is passed by reference, we must ensure the
3869 master thread doesn't modify it before it is copied over in all
3870 threads. Similarly for variables in both firstprivate and
3871 lastprivate clauses we need to ensure the lastprivate copying
3872 happens after firstprivate copying in all threads. And similarly
3873 for UDRs if initializer expression refers to omp_orig. */
3874 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3876 /* Don't add any barrier for #pragma omp simd or
3877 #pragma omp distribute. */
3878 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3879 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
3880 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3883 /* If max_vf is non-zero, then we can use only a vectorization factor
3884 up to the max_vf we chose. So stick it into the safelen clause. */
3885 if (max_vf)
3887 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3888 OMP_CLAUSE_SAFELEN);
3889 if (c == NULL_TREE
3890 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
3891 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3892 max_vf) == 1))
3894 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3895 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3896 max_vf);
3897 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3898 gimple_omp_for_set_clauses (ctx->stmt, c);
3904 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3905 both parallel and workshare constructs. PREDICATE may be NULL if it's
3906 always true. */
3908 static void
3909 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3910 omp_context *ctx)
3912 tree x, c, label = NULL, orig_clauses = clauses;
3913 bool par_clauses = false;
3914 tree simduid = NULL, lastlane = NULL;
3916 /* Early exit if there are no lastprivate or linear clauses. */
3917 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3918 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3919 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3920 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3921 break;
3922 if (clauses == NULL)
3924 /* If this was a workshare clause, see if it had been combined
3925 with its parallel. In that case, look for the clauses on the
3926 parallel statement itself. */
3927 if (is_parallel_ctx (ctx))
3928 return;
3930 ctx = ctx->outer;
3931 if (ctx == NULL || !is_parallel_ctx (ctx))
3932 return;
3934 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3935 OMP_CLAUSE_LASTPRIVATE);
3936 if (clauses == NULL)
3937 return;
3938 par_clauses = true;
3941 if (predicate)
3943 gimple stmt;
3944 tree label_true, arm1, arm2;
3946 label = create_artificial_label (UNKNOWN_LOCATION);
3947 label_true = create_artificial_label (UNKNOWN_LOCATION);
3948 arm1 = TREE_OPERAND (predicate, 0);
3949 arm2 = TREE_OPERAND (predicate, 1);
3950 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3951 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3952 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3953 label_true, label);
3954 gimple_seq_add_stmt (stmt_list, stmt);
3955 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3958 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3959 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3961 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3962 if (simduid)
3963 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3966 for (c = clauses; c ;)
3968 tree var, new_var;
3969 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3971 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3972 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3973 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3975 var = OMP_CLAUSE_DECL (c);
3976 new_var = lookup_decl (var, ctx);
3978 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3980 tree val = DECL_VALUE_EXPR (new_var);
3981 if (TREE_CODE (val) == ARRAY_REF
3982 && VAR_P (TREE_OPERAND (val, 0))
3983 && lookup_attribute ("omp simd array",
3984 DECL_ATTRIBUTES (TREE_OPERAND (val,
3985 0))))
3987 if (lastlane == NULL)
3989 lastlane = create_tmp_var (unsigned_type_node, NULL);
3990 gimple g
3991 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3992 2, simduid,
3993 TREE_OPERAND (val, 1));
3994 gimple_call_set_lhs (g, lastlane);
3995 gimple_seq_add_stmt (stmt_list, g);
3997 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3998 TREE_OPERAND (val, 0), lastlane,
3999 NULL_TREE, NULL_TREE);
4003 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
4004 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
4006 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
4007 gimple_seq_add_seq (stmt_list,
4008 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
4009 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
4011 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4012 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
4014 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
4015 gimple_seq_add_seq (stmt_list,
4016 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
4017 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
4020 x = build_outer_var_ref (var, ctx);
4021 if (is_reference (var))
4022 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4023 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
4024 gimplify_and_add (x, stmt_list);
4026 c = OMP_CLAUSE_CHAIN (c);
4027 if (c == NULL && !par_clauses)
4029 /* If this was a workshare clause, see if it had been combined
4030 with its parallel. In that case, continue looking for the
4031 clauses also on the parallel statement itself. */
4032 if (is_parallel_ctx (ctx))
4033 break;
4035 ctx = ctx->outer;
4036 if (ctx == NULL || !is_parallel_ctx (ctx))
4037 break;
4039 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
4040 OMP_CLAUSE_LASTPRIVATE);
4041 par_clauses = true;
4045 if (label)
4046 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
4050 /* Generate code to implement the REDUCTION clauses. */
4052 static void
4053 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
4055 gimple_seq sub_seq = NULL;
4056 gimple stmt;
4057 tree x, c;
4058 int count = 0;
4060 /* SIMD reductions are handled in lower_rec_input_clauses. */
4061 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4062 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
4063 return;
4065 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
4066 update in that case, otherwise use a lock. */
4067 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
4068 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
4070 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4072 /* Never use OMP_ATOMIC for array reductions or UDRs. */
4073 count = -1;
4074 break;
4076 count++;
4079 if (count == 0)
4080 return;
4082 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4084 tree var, ref, new_var;
4085 enum tree_code code;
4086 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4088 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
4089 continue;
4091 var = OMP_CLAUSE_DECL (c);
4092 new_var = lookup_decl (var, ctx);
4093 if (is_reference (var))
4094 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4095 ref = build_outer_var_ref (var, ctx);
4096 code = OMP_CLAUSE_REDUCTION_CODE (c);
4098 /* reduction(-:var) sums up the partial results, so it acts
4099 identically to reduction(+:var). */
4100 if (code == MINUS_EXPR)
4101 code = PLUS_EXPR;
4103 if (count == 1)
4105 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
4107 addr = save_expr (addr);
4108 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
4109 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
4110 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
4111 gimplify_and_add (x, stmt_seqp);
4112 return;
4115 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4117 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4119 if (is_reference (var)
4120 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4121 TREE_TYPE (ref)))
4122 ref = build_fold_addr_expr_loc (clause_loc, ref);
4123 SET_DECL_VALUE_EXPR (placeholder, ref);
4124 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4125 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
4126 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
4127 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4128 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
4130 else
4132 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4133 ref = build_outer_var_ref (var, ctx);
4134 gimplify_assign (ref, x, &sub_seq);
4138 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4140 gimple_seq_add_stmt (stmt_seqp, stmt);
4142 gimple_seq_add_seq (stmt_seqp, sub_seq);
4144 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4146 gimple_seq_add_stmt (stmt_seqp, stmt);
4150 /* Generate code to implement the COPYPRIVATE clauses. */
4152 static void
4153 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4154 omp_context *ctx)
4156 tree c;
4158 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4160 tree var, new_var, ref, x;
4161 bool by_ref;
4162 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4164 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4165 continue;
4167 var = OMP_CLAUSE_DECL (c);
4168 by_ref = use_pointer_for_field (var, NULL);
4170 ref = build_sender_ref (var, ctx);
4171 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4172 if (by_ref)
4174 x = build_fold_addr_expr_loc (clause_loc, new_var);
4175 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4177 gimplify_assign (ref, x, slist);
4179 ref = build_receiver_ref (var, false, ctx);
4180 if (by_ref)
4182 ref = fold_convert_loc (clause_loc,
4183 build_pointer_type (TREE_TYPE (new_var)),
4184 ref);
4185 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4187 if (is_reference (var))
4189 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4190 ref = build_simple_mem_ref_loc (clause_loc, ref);
4191 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4193 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4194 gimplify_and_add (x, rlist);
4199 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4200 and REDUCTION from the sender (aka parent) side. */
4202 static void
4203 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4204 omp_context *ctx)
4206 tree c;
4208 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4210 tree val, ref, x, var;
4211 bool by_ref, do_in = false, do_out = false;
4212 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4214 switch (OMP_CLAUSE_CODE (c))
4216 case OMP_CLAUSE_PRIVATE:
4217 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4218 break;
4219 continue;
4220 case OMP_CLAUSE_FIRSTPRIVATE:
4221 case OMP_CLAUSE_COPYIN:
4222 case OMP_CLAUSE_LASTPRIVATE:
4223 case OMP_CLAUSE_REDUCTION:
4224 case OMP_CLAUSE__LOOPTEMP_:
4225 break;
4226 default:
4227 continue;
4230 val = OMP_CLAUSE_DECL (c);
4231 var = lookup_decl_in_outer_ctx (val, ctx);
4233 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4234 && is_global_var (var))
4235 continue;
4236 if (is_variable_sized (val))
4237 continue;
4238 by_ref = use_pointer_for_field (val, NULL);
4240 switch (OMP_CLAUSE_CODE (c))
4242 case OMP_CLAUSE_PRIVATE:
4243 case OMP_CLAUSE_FIRSTPRIVATE:
4244 case OMP_CLAUSE_COPYIN:
4245 case OMP_CLAUSE__LOOPTEMP_:
4246 do_in = true;
4247 break;
4249 case OMP_CLAUSE_LASTPRIVATE:
4250 if (by_ref || is_reference (val))
4252 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4253 continue;
4254 do_in = true;
4256 else
4258 do_out = true;
4259 if (lang_hooks.decls.omp_private_outer_ref (val))
4260 do_in = true;
4262 break;
4264 case OMP_CLAUSE_REDUCTION:
4265 do_in = true;
4266 do_out = !(by_ref || is_reference (val));
4267 break;
4269 default:
4270 gcc_unreachable ();
4273 if (do_in)
4275 ref = build_sender_ref (val, ctx);
4276 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4277 gimplify_assign (ref, x, ilist);
4278 if (is_task_ctx (ctx))
4279 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4282 if (do_out)
4284 ref = build_sender_ref (val, ctx);
4285 gimplify_assign (var, ref, olist);
4290 /* Generate code to implement SHARED from the sender (aka parent)
4291 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4292 list things that got automatically shared. */
4294 static void
4295 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4297 tree var, ovar, nvar, f, x, record_type;
4299 if (ctx->record_type == NULL)
4300 return;
4302 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4303 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4305 ovar = DECL_ABSTRACT_ORIGIN (f);
4306 nvar = maybe_lookup_decl (ovar, ctx);
4307 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4308 continue;
4310 /* If CTX is a nested parallel directive. Find the immediately
4311 enclosing parallel or workshare construct that contains a
4312 mapping for OVAR. */
4313 var = lookup_decl_in_outer_ctx (ovar, ctx);
4315 if (use_pointer_for_field (ovar, ctx))
4317 x = build_sender_ref (ovar, ctx);
4318 var = build_fold_addr_expr (var);
4319 gimplify_assign (x, var, ilist);
4321 else
4323 x = build_sender_ref (ovar, ctx);
4324 gimplify_assign (x, var, ilist);
4326 if (!TREE_READONLY (var)
4327 /* We don't need to receive a new reference to a result
4328 or parm decl. In fact we may not store to it as we will
4329 invalidate any pending RSO and generate wrong gimple
4330 during inlining. */
4331 && !((TREE_CODE (var) == RESULT_DECL
4332 || TREE_CODE (var) == PARM_DECL)
4333 && DECL_BY_REFERENCE (var)))
4335 x = build_sender_ref (ovar, ctx);
4336 gimplify_assign (var, x, olist);
4343 /* A convenience function to build an empty GIMPLE_COND with just the
4344 condition. */
4346 static gimple_cond
4347 gimple_build_cond_empty (tree cond)
4349 enum tree_code pred_code;
4350 tree lhs, rhs;
4352 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4353 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4357 /* Build the function calls to GOMP_parallel_start etc to actually
4358 generate the parallel operation. REGION is the parallel region
4359 being expanded. BB is the block where to insert the code. WS_ARGS
4360 will be set if this is a call to a combined parallel+workshare
4361 construct, it contains the list of additional arguments needed by
4362 the workshare construct. */
4364 static void
4365 expand_parallel_call (struct omp_region *region, basic_block bb,
4366 gimple_omp_parallel entry_stmt,
4367 vec<tree, va_gc> *ws_args)
4369 tree t, t1, t2, val, cond, c, clauses, flags;
4370 gimple_stmt_iterator gsi;
4371 gimple stmt;
4372 enum built_in_function start_ix;
4373 int start_ix2;
4374 location_t clause_loc;
4375 vec<tree, va_gc> *args;
4377 clauses = gimple_omp_parallel_clauses (entry_stmt);
4379 /* Determine what flavor of GOMP_parallel we will be
4380 emitting. */
4381 start_ix = BUILT_IN_GOMP_PARALLEL;
4382 if (is_combined_parallel (region))
4384 switch (region->inner->type)
4386 case GIMPLE_OMP_FOR:
4387 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4388 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4389 + (region->inner->sched_kind
4390 == OMP_CLAUSE_SCHEDULE_RUNTIME
4391 ? 3 : region->inner->sched_kind));
4392 start_ix = (enum built_in_function)start_ix2;
4393 break;
4394 case GIMPLE_OMP_SECTIONS:
4395 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4396 break;
4397 default:
4398 gcc_unreachable ();
4402 /* By default, the value of NUM_THREADS is zero (selected at run time)
4403 and there is no conditional. */
4404 cond = NULL_TREE;
4405 val = build_int_cst (unsigned_type_node, 0);
4406 flags = build_int_cst (unsigned_type_node, 0);
4408 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4409 if (c)
4410 cond = OMP_CLAUSE_IF_EXPR (c);
4412 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4413 if (c)
4415 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4416 clause_loc = OMP_CLAUSE_LOCATION (c);
4418 else
4419 clause_loc = gimple_location (entry_stmt);
4421 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4422 if (c)
4423 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4425 /* Ensure 'val' is of the correct type. */
4426 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4428 /* If we found the clause 'if (cond)', build either
4429 (cond != 0) or (cond ? val : 1u). */
4430 if (cond)
4432 cond = gimple_boolify (cond);
4434 if (integer_zerop (val))
4435 val = fold_build2_loc (clause_loc,
4436 EQ_EXPR, unsigned_type_node, cond,
4437 build_int_cst (TREE_TYPE (cond), 0));
4438 else
4440 basic_block cond_bb, then_bb, else_bb;
4441 edge e, e_then, e_else;
4442 tree tmp_then, tmp_else, tmp_join, tmp_var;
4444 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4445 if (gimple_in_ssa_p (cfun))
4447 tmp_then = make_ssa_name (tmp_var, NULL);
4448 tmp_else = make_ssa_name (tmp_var, NULL);
4449 tmp_join = make_ssa_name (tmp_var, NULL);
4451 else
4453 tmp_then = tmp_var;
4454 tmp_else = tmp_var;
4455 tmp_join = tmp_var;
4458 e = split_block (bb, NULL);
4459 cond_bb = e->src;
4460 bb = e->dest;
4461 remove_edge (e);
4463 then_bb = create_empty_bb (cond_bb);
4464 else_bb = create_empty_bb (then_bb);
4465 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4466 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4468 stmt = gimple_build_cond_empty (cond);
4469 gsi = gsi_start_bb (cond_bb);
4470 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4472 gsi = gsi_start_bb (then_bb);
4473 stmt = gimple_build_assign (tmp_then, val);
4474 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4476 gsi = gsi_start_bb (else_bb);
4477 stmt = gimple_build_assign
4478 (tmp_else, build_int_cst (unsigned_type_node, 1));
4479 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4481 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4482 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4483 add_bb_to_loop (then_bb, cond_bb->loop_father);
4484 add_bb_to_loop (else_bb, cond_bb->loop_father);
4485 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4486 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4488 if (gimple_in_ssa_p (cfun))
4490 gimple_phi phi = create_phi_node (tmp_join, bb);
4491 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4492 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4495 val = tmp_join;
4498 gsi = gsi_start_bb (bb);
4499 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4500 false, GSI_CONTINUE_LINKING);
4503 gsi = gsi_last_bb (bb);
4504 t = gimple_omp_parallel_data_arg (entry_stmt);
4505 if (t == NULL)
4506 t1 = null_pointer_node;
4507 else
4508 t1 = build_fold_addr_expr (t);
4509 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4511 vec_alloc (args, 4 + vec_safe_length (ws_args));
4512 args->quick_push (t2);
4513 args->quick_push (t1);
4514 args->quick_push (val);
4515 if (ws_args)
4516 args->splice (*ws_args);
4517 args->quick_push (flags);
4519 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4520 builtin_decl_explicit (start_ix), args);
4522 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4523 false, GSI_CONTINUE_LINKING);
4526 /* Insert a function call whose name is FUNC_NAME with the information from
4527 ENTRY_STMT into the basic_block BB. */
4529 static void
4530 expand_cilk_for_call (basic_block bb, gimple_omp_parallel entry_stmt,
4531 vec <tree, va_gc> *ws_args)
4533 tree t, t1, t2;
4534 gimple_stmt_iterator gsi;
4535 vec <tree, va_gc> *args;
4537 gcc_assert (vec_safe_length (ws_args) == 2);
4538 tree func_name = (*ws_args)[0];
4539 tree grain = (*ws_args)[1];
4541 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
4542 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
4543 gcc_assert (count != NULL_TREE);
4544 count = OMP_CLAUSE_OPERAND (count, 0);
4546 gsi = gsi_last_bb (bb);
4547 t = gimple_omp_parallel_data_arg (entry_stmt);
4548 if (t == NULL)
4549 t1 = null_pointer_node;
4550 else
4551 t1 = build_fold_addr_expr (t);
4552 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4554 vec_alloc (args, 4);
4555 args->quick_push (t2);
4556 args->quick_push (t1);
4557 args->quick_push (count);
4558 args->quick_push (grain);
4559 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
4561 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
4562 GSI_CONTINUE_LINKING);
4565 /* Build the function call to GOMP_task to actually
4566 generate the task operation. BB is the block where to insert the code. */
4568 static void
4569 expand_task_call (basic_block bb, gimple entry_stmt)
4571 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4572 gimple_stmt_iterator gsi;
4573 location_t loc = gimple_location (entry_stmt);
4575 clauses = gimple_omp_task_clauses (entry_stmt);
4577 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4578 if (c)
4579 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4580 else
4581 cond = boolean_true_node;
4583 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4584 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4585 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4586 flags = build_int_cst (unsigned_type_node,
4587 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4589 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4590 if (c)
4592 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4593 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4594 build_int_cst (unsigned_type_node, 2),
4595 build_int_cst (unsigned_type_node, 0));
4596 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4598 if (depend)
4599 depend = OMP_CLAUSE_DECL (depend);
4600 else
4601 depend = build_int_cst (ptr_type_node, 0);
4603 gsi = gsi_last_bb (bb);
4604 t = gimple_omp_task_data_arg (entry_stmt);
4605 if (t == NULL)
4606 t2 = null_pointer_node;
4607 else
4608 t2 = build_fold_addr_expr_loc (loc, t);
4609 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4610 t = gimple_omp_task_copy_fn (entry_stmt);
4611 if (t == NULL)
4612 t3 = null_pointer_node;
4613 else
4614 t3 = build_fold_addr_expr_loc (loc, t);
4616 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4617 8, t1, t2, t3,
4618 gimple_omp_task_arg_size (entry_stmt),
4619 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4620 depend);
4622 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4623 false, GSI_CONTINUE_LINKING);
4627 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4628 catch handler and return it. This prevents programs from violating the
4629 structured block semantics with throws. */
4631 static gimple_seq
4632 maybe_catch_exception (gimple_seq body)
4634 gimple g;
4635 tree decl;
4637 if (!flag_exceptions)
4638 return body;
4640 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4641 decl = lang_hooks.eh_protect_cleanup_actions ();
4642 else
4643 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4645 g = gimple_build_eh_must_not_throw (decl);
4646 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4647 GIMPLE_TRY_CATCH);
4649 return gimple_seq_alloc_with_stmt (g);
4652 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4654 static tree
4655 vec2chain (vec<tree, va_gc> *v)
4657 tree chain = NULL_TREE, t;
4658 unsigned ix;
4660 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4662 DECL_CHAIN (t) = chain;
4663 chain = t;
4666 return chain;
4670 /* Remove barriers in REGION->EXIT's block. Note that this is only
4671 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4672 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4673 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4674 removed. */
4676 static void
4677 remove_exit_barrier (struct omp_region *region)
4679 gimple_stmt_iterator gsi;
4680 basic_block exit_bb;
4681 edge_iterator ei;
4682 edge e;
4683 gimple stmt;
4684 int any_addressable_vars = -1;
4686 exit_bb = region->exit;
4688 /* If the parallel region doesn't return, we don't have REGION->EXIT
4689 block at all. */
4690 if (! exit_bb)
4691 return;
4693 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4694 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4695 statements that can appear in between are extremely limited -- no
4696 memory operations at all. Here, we allow nothing at all, so the
4697 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4698 gsi = gsi_last_bb (exit_bb);
4699 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4700 gsi_prev (&gsi);
4701 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4702 return;
4704 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4706 gsi = gsi_last_bb (e->src);
4707 if (gsi_end_p (gsi))
4708 continue;
4709 stmt = gsi_stmt (gsi);
4710 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4711 && !gimple_omp_return_nowait_p (stmt))
4713 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4714 in many cases. If there could be tasks queued, the barrier
4715 might be needed to let the tasks run before some local
4716 variable of the parallel that the task uses as shared
4717 runs out of scope. The task can be spawned either
4718 from within current function (this would be easy to check)
4719 or from some function it calls and gets passed an address
4720 of such a variable. */
4721 if (any_addressable_vars < 0)
4723 gimple_omp_parallel parallel_stmt =
4724 as_a <gimple_omp_parallel> (last_stmt (region->entry));
4725 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4726 tree local_decls, block, decl;
4727 unsigned ix;
4729 any_addressable_vars = 0;
4730 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4731 if (TREE_ADDRESSABLE (decl))
4733 any_addressable_vars = 1;
4734 break;
4736 for (block = gimple_block (stmt);
4737 !any_addressable_vars
4738 && block
4739 && TREE_CODE (block) == BLOCK;
4740 block = BLOCK_SUPERCONTEXT (block))
4742 for (local_decls = BLOCK_VARS (block);
4743 local_decls;
4744 local_decls = DECL_CHAIN (local_decls))
4745 if (TREE_ADDRESSABLE (local_decls))
4747 any_addressable_vars = 1;
4748 break;
4750 if (block == gimple_block (parallel_stmt))
4751 break;
4754 if (!any_addressable_vars)
4755 gimple_omp_return_set_nowait (stmt);
4760 static void
4761 remove_exit_barriers (struct omp_region *region)
4763 if (region->type == GIMPLE_OMP_PARALLEL)
4764 remove_exit_barrier (region);
4766 if (region->inner)
4768 region = region->inner;
4769 remove_exit_barriers (region);
4770 while (region->next)
4772 region = region->next;
4773 remove_exit_barriers (region);
4778 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4779 calls. These can't be declared as const functions, but
4780 within one parallel body they are constant, so they can be
4781 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4782 which are declared const. Similarly for task body, except
4783 that in untied task omp_get_thread_num () can change at any task
4784 scheduling point. */
4786 static void
4787 optimize_omp_library_calls (gimple entry_stmt)
4789 basic_block bb;
4790 gimple_stmt_iterator gsi;
4791 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4792 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4793 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4794 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4795 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4796 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4797 OMP_CLAUSE_UNTIED) != NULL);
4799 FOR_EACH_BB_FN (bb, cfun)
4800 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4802 gimple call = gsi_stmt (gsi);
4803 tree decl;
4805 if (is_gimple_call (call)
4806 && (decl = gimple_call_fndecl (call))
4807 && DECL_EXTERNAL (decl)
4808 && TREE_PUBLIC (decl)
4809 && DECL_INITIAL (decl) == NULL)
4811 tree built_in;
4813 if (DECL_NAME (decl) == thr_num_id)
4815 /* In #pragma omp task untied omp_get_thread_num () can change
4816 during the execution of the task region. */
4817 if (untied_task)
4818 continue;
4819 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4821 else if (DECL_NAME (decl) == num_thr_id)
4822 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4823 else
4824 continue;
4826 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4827 || gimple_call_num_args (call) != 0)
4828 continue;
4830 if (flag_exceptions && !TREE_NOTHROW (decl))
4831 continue;
4833 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4834 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4835 TREE_TYPE (TREE_TYPE (built_in))))
4836 continue;
4838 gimple_call_set_fndecl (call, built_in);
4843 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4844 regimplified. */
4846 static tree
4847 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4849 tree t = *tp;
4851 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4852 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4853 return t;
4855 if (TREE_CODE (t) == ADDR_EXPR)
4856 recompute_tree_invariant_for_addr_expr (t);
4858 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4859 return NULL_TREE;
4862 /* Prepend TO = FROM assignment before *GSI_P. */
4864 static void
4865 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4867 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4868 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4869 true, GSI_SAME_STMT);
4870 gimple stmt = gimple_build_assign (to, from);
4871 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4872 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4873 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4875 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4876 gimple_regimplify_operands (stmt, &gsi);
4880 /* Expand the OpenMP parallel or task directive starting at REGION. */
4882 static void
4883 expand_omp_taskreg (struct omp_region *region)
4885 basic_block entry_bb, exit_bb, new_bb;
4886 struct function *child_cfun;
4887 tree child_fn, block, t;
4888 gimple_stmt_iterator gsi;
4889 gimple entry_stmt, stmt;
4890 edge e;
4891 vec<tree, va_gc> *ws_args;
4893 entry_stmt = last_stmt (region->entry);
4894 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4895 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4897 entry_bb = region->entry;
4898 exit_bb = region->exit;
4900 bool is_cilk_for
4901 = (flag_cilkplus
4902 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
4903 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
4904 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
4906 if (is_cilk_for)
4907 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
4908 and the inner statement contains the name of the built-in function
4909 and grain. */
4910 ws_args = region->inner->ws_args;
4911 else if (is_combined_parallel (region))
4912 ws_args = region->ws_args;
4913 else
4914 ws_args = NULL;
4916 if (child_cfun->cfg)
4918 /* Due to inlining, it may happen that we have already outlined
4919 the region, in which case all we need to do is make the
4920 sub-graph unreachable and emit the parallel call. */
4921 edge entry_succ_e, exit_succ_e;
4923 entry_succ_e = single_succ_edge (entry_bb);
4925 gsi = gsi_last_bb (entry_bb);
4926 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4927 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4928 gsi_remove (&gsi, true);
4930 new_bb = entry_bb;
4931 if (exit_bb)
4933 exit_succ_e = single_succ_edge (exit_bb);
4934 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4936 remove_edge_and_dominated_blocks (entry_succ_e);
4938 else
4940 unsigned srcidx, dstidx, num;
4942 /* If the parallel region needs data sent from the parent
4943 function, then the very first statement (except possible
4944 tree profile counter updates) of the parallel body
4945 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4946 &.OMP_DATA_O is passed as an argument to the child function,
4947 we need to replace it with the argument as seen by the child
4948 function.
4950 In most cases, this will end up being the identity assignment
4951 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4952 a function call that has been inlined, the original PARM_DECL
4953 .OMP_DATA_I may have been converted into a different local
4954 variable. In which case, we need to keep the assignment. */
4955 if (gimple_omp_taskreg_data_arg (entry_stmt))
4957 basic_block entry_succ_bb = single_succ (entry_bb);
4958 tree arg, narg;
4959 gimple parcopy_stmt = NULL;
4961 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4963 gimple stmt;
4965 gcc_assert (!gsi_end_p (gsi));
4966 stmt = gsi_stmt (gsi);
4967 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4968 continue;
4970 if (gimple_num_ops (stmt) == 2)
4972 tree arg = gimple_assign_rhs1 (stmt);
4974 /* We're ignore the subcode because we're
4975 effectively doing a STRIP_NOPS. */
4977 if (TREE_CODE (arg) == ADDR_EXPR
4978 && TREE_OPERAND (arg, 0)
4979 == gimple_omp_taskreg_data_arg (entry_stmt))
4981 parcopy_stmt = stmt;
4982 break;
4987 gcc_assert (parcopy_stmt != NULL);
4988 arg = DECL_ARGUMENTS (child_fn);
4990 if (!gimple_in_ssa_p (cfun))
4992 if (gimple_assign_lhs (parcopy_stmt) == arg)
4993 gsi_remove (&gsi, true);
4994 else
4996 /* ?? Is setting the subcode really necessary ?? */
4997 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4998 gimple_assign_set_rhs1 (parcopy_stmt, arg);
5001 else
5003 /* If we are in ssa form, we must load the value from the default
5004 definition of the argument. That should not be defined now,
5005 since the argument is not used uninitialized. */
5006 gcc_assert (ssa_default_def (cfun, arg) == NULL);
5007 narg = make_ssa_name (arg, gimple_build_nop ());
5008 set_ssa_default_def (cfun, arg, narg);
5009 /* ?? Is setting the subcode really necessary ?? */
5010 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
5011 gimple_assign_set_rhs1 (parcopy_stmt, narg);
5012 update_stmt (parcopy_stmt);
5016 /* Declare local variables needed in CHILD_CFUN. */
5017 block = DECL_INITIAL (child_fn);
5018 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
5019 /* The gimplifier could record temporaries in parallel/task block
5020 rather than in containing function's local_decls chain,
5021 which would mean cgraph missed finalizing them. Do it now. */
5022 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
5023 if (TREE_CODE (t) == VAR_DECL
5024 && TREE_STATIC (t)
5025 && !DECL_EXTERNAL (t))
5026 varpool_node::finalize_decl (t);
5027 DECL_SAVED_TREE (child_fn) = NULL;
5028 /* We'll create a CFG for child_fn, so no gimple body is needed. */
5029 gimple_set_body (child_fn, NULL);
5030 TREE_USED (block) = 1;
5032 /* Reset DECL_CONTEXT on function arguments. */
5033 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
5034 DECL_CONTEXT (t) = child_fn;
5036 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
5037 so that it can be moved to the child function. */
5038 gsi = gsi_last_bb (entry_bb);
5039 stmt = gsi_stmt (gsi);
5040 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
5041 || gimple_code (stmt) == GIMPLE_OMP_TASK));
5042 gsi_remove (&gsi, true);
5043 e = split_block (entry_bb, stmt);
5044 entry_bb = e->dest;
5045 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5047 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5048 if (exit_bb)
5050 gsi = gsi_last_bb (exit_bb);
5051 gcc_assert (!gsi_end_p (gsi)
5052 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5053 stmt = gimple_build_return (NULL);
5054 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5055 gsi_remove (&gsi, true);
5058 /* Move the parallel region into CHILD_CFUN. */
5060 if (gimple_in_ssa_p (cfun))
5062 init_tree_ssa (child_cfun);
5063 init_ssa_operands (child_cfun);
5064 child_cfun->gimple_df->in_ssa_p = true;
5065 block = NULL_TREE;
5067 else
5068 block = gimple_block (entry_stmt);
5070 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
5071 if (exit_bb)
5072 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
5073 /* When the OMP expansion process cannot guarantee an up-to-date
5074 loop tree arrange for the child function to fixup loops. */
5075 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
5076 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
5078 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
5079 num = vec_safe_length (child_cfun->local_decls);
5080 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
5082 t = (*child_cfun->local_decls)[srcidx];
5083 if (DECL_CONTEXT (t) == cfun->decl)
5084 continue;
5085 if (srcidx != dstidx)
5086 (*child_cfun->local_decls)[dstidx] = t;
5087 dstidx++;
5089 if (dstidx != num)
5090 vec_safe_truncate (child_cfun->local_decls, dstidx);
5092 /* Inform the callgraph about the new function. */
5093 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
5094 cgraph_node::add_new_function (child_fn, true);
5096 /* Fix the callgraph edges for child_cfun. Those for cfun will be
5097 fixed in a following pass. */
5098 push_cfun (child_cfun);
5099 if (optimize)
5100 optimize_omp_library_calls (entry_stmt);
5101 cgraph_edge::rebuild_edges ();
5103 /* Some EH regions might become dead, see PR34608. If
5104 pass_cleanup_cfg isn't the first pass to happen with the
5105 new child, these dead EH edges might cause problems.
5106 Clean them up now. */
5107 if (flag_exceptions)
5109 basic_block bb;
5110 bool changed = false;
5112 FOR_EACH_BB_FN (bb, cfun)
5113 changed |= gimple_purge_dead_eh_edges (bb);
5114 if (changed)
5115 cleanup_tree_cfg ();
5117 if (gimple_in_ssa_p (cfun))
5118 update_ssa (TODO_update_ssa);
5119 pop_cfun ();
5122 /* Emit a library call to launch the children threads. */
5123 if (is_cilk_for)
5124 expand_cilk_for_call (new_bb,
5125 as_a <gimple_omp_parallel> (entry_stmt), ws_args);
5126 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
5127 expand_parallel_call (region, new_bb,
5128 as_a <gimple_omp_parallel> (entry_stmt), ws_args);
5129 else
5130 expand_task_call (new_bb, entry_stmt);
5131 if (gimple_in_ssa_p (cfun))
5132 update_ssa (TODO_update_ssa_only_virtuals);
5136 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
5137 of the combined collapse > 1 loop constructs, generate code like:
5138 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
5139 if (cond3 is <)
5140 adj = STEP3 - 1;
5141 else
5142 adj = STEP3 + 1;
5143 count3 = (adj + N32 - N31) / STEP3;
5144 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
5145 if (cond2 is <)
5146 adj = STEP2 - 1;
5147 else
5148 adj = STEP2 + 1;
5149 count2 = (adj + N22 - N21) / STEP2;
5150 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
5151 if (cond1 is <)
5152 adj = STEP1 - 1;
5153 else
5154 adj = STEP1 + 1;
5155 count1 = (adj + N12 - N11) / STEP1;
5156 count = count1 * count2 * count3;
5157 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
5158 count = 0;
5159 and set ZERO_ITER_BB to that bb. If this isn't the outermost
5160 of the combined loop constructs, just initialize COUNTS array
5161 from the _looptemp_ clauses. */
5163 /* NOTE: It *could* be better to moosh all of the BBs together,
5164 creating one larger BB with all the computation and the unexpected
5165 jump at the end. I.e.
5167 bool zero3, zero2, zero1, zero;
5169 zero3 = N32 c3 N31;
5170 count3 = (N32 - N31) /[cl] STEP3;
5171 zero2 = N22 c2 N21;
5172 count2 = (N22 - N21) /[cl] STEP2;
5173 zero1 = N12 c1 N11;
5174 count1 = (N12 - N11) /[cl] STEP1;
5175 zero = zero3 || zero2 || zero1;
5176 count = count1 * count2 * count3;
5177 if (__builtin_expect(zero, false)) goto zero_iter_bb;
5179 After all, we expect the zero=false, and thus we expect to have to
5180 evaluate all of the comparison expressions, so short-circuiting
5181 oughtn't be a win. Since the condition isn't protecting a
5182 denominator, we're not concerned about divide-by-zero, so we can
5183 fully evaluate count even if a numerator turned out to be wrong.
5185 It seems like putting this all together would create much better
5186 scheduling opportunities, and less pressure on the chip's branch
5187 predictor. */
5189 static void
5190 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5191 basic_block &entry_bb, tree *counts,
5192 basic_block &zero_iter_bb, int &first_zero_iter,
5193 basic_block &l2_dom_bb)
5195 tree t, type = TREE_TYPE (fd->loop.v);
5196 gimple stmt;
5197 edge e, ne;
5198 int i;
5200 /* Collapsed loops need work for expansion into SSA form. */
5201 gcc_assert (!gimple_in_ssa_p (cfun));
5203 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5204 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5206 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5207 isn't supposed to be handled, as the inner loop doesn't
5208 use it. */
5209 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5210 OMP_CLAUSE__LOOPTEMP_);
5211 gcc_assert (innerc);
5212 for (i = 0; i < fd->collapse; i++)
5214 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5215 OMP_CLAUSE__LOOPTEMP_);
5216 gcc_assert (innerc);
5217 if (i)
5218 counts[i] = OMP_CLAUSE_DECL (innerc);
5219 else
5220 counts[0] = NULL_TREE;
5222 return;
5225 for (i = 0; i < fd->collapse; i++)
5227 tree itype = TREE_TYPE (fd->loops[i].v);
5229 if (SSA_VAR_P (fd->loop.n2)
5230 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5231 fold_convert (itype, fd->loops[i].n1),
5232 fold_convert (itype, fd->loops[i].n2)))
5233 == NULL_TREE || !integer_onep (t)))
5235 tree n1, n2;
5236 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5237 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5238 true, GSI_SAME_STMT);
5239 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5240 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5241 true, GSI_SAME_STMT);
5242 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5243 NULL_TREE, NULL_TREE);
5244 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5245 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5246 expand_omp_regimplify_p, NULL, NULL)
5247 || walk_tree (gimple_cond_rhs_ptr (stmt),
5248 expand_omp_regimplify_p, NULL, NULL))
5250 *gsi = gsi_for_stmt (stmt);
5251 gimple_regimplify_operands (stmt, gsi);
5253 e = split_block (entry_bb, stmt);
5254 if (zero_iter_bb == NULL)
5256 first_zero_iter = i;
5257 zero_iter_bb = create_empty_bb (entry_bb);
5258 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5259 *gsi = gsi_after_labels (zero_iter_bb);
5260 stmt = gimple_build_assign (fd->loop.n2,
5261 build_zero_cst (type));
5262 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5263 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5264 entry_bb);
5266 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5267 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5268 e->flags = EDGE_TRUE_VALUE;
5269 e->probability = REG_BR_PROB_BASE - ne->probability;
5270 if (l2_dom_bb == NULL)
5271 l2_dom_bb = entry_bb;
5272 entry_bb = e->dest;
5273 *gsi = gsi_last_bb (entry_bb);
5276 if (POINTER_TYPE_P (itype))
5277 itype = signed_type_for (itype);
5278 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5279 ? -1 : 1));
5280 t = fold_build2 (PLUS_EXPR, itype,
5281 fold_convert (itype, fd->loops[i].step), t);
5282 t = fold_build2 (PLUS_EXPR, itype, t,
5283 fold_convert (itype, fd->loops[i].n2));
5284 t = fold_build2 (MINUS_EXPR, itype, t,
5285 fold_convert (itype, fd->loops[i].n1));
5286 /* ?? We could probably use CEIL_DIV_EXPR instead of
5287 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5288 generate the same code in the end because generically we
5289 don't know that the values involved must be negative for
5290 GT?? */
5291 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5292 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5293 fold_build1 (NEGATE_EXPR, itype, t),
5294 fold_build1 (NEGATE_EXPR, itype,
5295 fold_convert (itype,
5296 fd->loops[i].step)));
5297 else
5298 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5299 fold_convert (itype, fd->loops[i].step));
5300 t = fold_convert (type, t);
5301 if (TREE_CODE (t) == INTEGER_CST)
5302 counts[i] = t;
5303 else
5305 counts[i] = create_tmp_reg (type, ".count");
5306 expand_omp_build_assign (gsi, counts[i], t);
5308 if (SSA_VAR_P (fd->loop.n2))
5310 if (i == 0)
5311 t = counts[0];
5312 else
5313 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5314 expand_omp_build_assign (gsi, fd->loop.n2, t);
5320 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5321 T = V;
5322 V3 = N31 + (T % count3) * STEP3;
5323 T = T / count3;
5324 V2 = N21 + (T % count2) * STEP2;
5325 T = T / count2;
5326 V1 = N11 + T * STEP1;
5327 if this loop doesn't have an inner loop construct combined with it.
5328 If it does have an inner loop construct combined with it and the
5329 iteration count isn't known constant, store values from counts array
5330 into its _looptemp_ temporaries instead. */
5332 static void
5333 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5334 tree *counts, gimple inner_stmt, tree startvar)
5336 int i;
5337 if (gimple_omp_for_combined_p (fd->for_stmt))
5339 /* If fd->loop.n2 is constant, then no propagation of the counts
5340 is needed, they are constant. */
5341 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5342 return;
5344 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5345 ? gimple_omp_parallel_clauses (inner_stmt)
5346 : gimple_omp_for_clauses (inner_stmt);
5347 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5348 isn't supposed to be handled, as the inner loop doesn't
5349 use it. */
5350 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5351 gcc_assert (innerc);
5352 for (i = 0; i < fd->collapse; i++)
5354 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5355 OMP_CLAUSE__LOOPTEMP_);
5356 gcc_assert (innerc);
5357 if (i)
5359 tree tem = OMP_CLAUSE_DECL (innerc);
5360 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5361 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5362 false, GSI_CONTINUE_LINKING);
5363 gimple stmt = gimple_build_assign (tem, t);
5364 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5367 return;
5370 tree type = TREE_TYPE (fd->loop.v);
5371 tree tem = create_tmp_reg (type, ".tem");
5372 gimple stmt = gimple_build_assign (tem, startvar);
5373 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5375 for (i = fd->collapse - 1; i >= 0; i--)
5377 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5378 itype = vtype;
5379 if (POINTER_TYPE_P (vtype))
5380 itype = signed_type_for (vtype);
5381 if (i != 0)
5382 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5383 else
5384 t = tem;
5385 t = fold_convert (itype, t);
5386 t = fold_build2 (MULT_EXPR, itype, t,
5387 fold_convert (itype, fd->loops[i].step));
5388 if (POINTER_TYPE_P (vtype))
5389 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5390 else
5391 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5392 t = force_gimple_operand_gsi (gsi, t,
5393 DECL_P (fd->loops[i].v)
5394 && TREE_ADDRESSABLE (fd->loops[i].v),
5395 NULL_TREE, false,
5396 GSI_CONTINUE_LINKING);
5397 stmt = gimple_build_assign (fd->loops[i].v, t);
5398 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5399 if (i != 0)
5401 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5402 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5403 false, GSI_CONTINUE_LINKING);
5404 stmt = gimple_build_assign (tem, t);
5405 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5411 /* Helper function for expand_omp_for_*. Generate code like:
5412 L10:
5413 V3 += STEP3;
5414 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5415 L11:
5416 V3 = N31;
5417 V2 += STEP2;
5418 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5419 L12:
5420 V2 = N21;
5421 V1 += STEP1;
5422 goto BODY_BB; */
5424 static basic_block
5425 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5426 basic_block body_bb)
5428 basic_block last_bb, bb, collapse_bb = NULL;
5429 int i;
5430 gimple_stmt_iterator gsi;
5431 edge e;
5432 tree t;
5433 gimple stmt;
5435 last_bb = cont_bb;
5436 for (i = fd->collapse - 1; i >= 0; i--)
5438 tree vtype = TREE_TYPE (fd->loops[i].v);
5440 bb = create_empty_bb (last_bb);
5441 add_bb_to_loop (bb, last_bb->loop_father);
5442 gsi = gsi_start_bb (bb);
5444 if (i < fd->collapse - 1)
5446 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5447 e->probability = REG_BR_PROB_BASE / 8;
5449 t = fd->loops[i + 1].n1;
5450 t = force_gimple_operand_gsi (&gsi, t,
5451 DECL_P (fd->loops[i + 1].v)
5452 && TREE_ADDRESSABLE (fd->loops[i
5453 + 1].v),
5454 NULL_TREE, false,
5455 GSI_CONTINUE_LINKING);
5456 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5457 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5459 else
5460 collapse_bb = bb;
5462 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5464 if (POINTER_TYPE_P (vtype))
5465 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5466 else
5467 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5468 t = force_gimple_operand_gsi (&gsi, t,
5469 DECL_P (fd->loops[i].v)
5470 && TREE_ADDRESSABLE (fd->loops[i].v),
5471 NULL_TREE, false, GSI_CONTINUE_LINKING);
5472 stmt = gimple_build_assign (fd->loops[i].v, t);
5473 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5475 if (i > 0)
5477 t = fd->loops[i].n2;
5478 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5479 false, GSI_CONTINUE_LINKING);
5480 tree v = fd->loops[i].v;
5481 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5482 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5483 false, GSI_CONTINUE_LINKING);
5484 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5485 stmt = gimple_build_cond_empty (t);
5486 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5487 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5488 e->probability = REG_BR_PROB_BASE * 7 / 8;
5490 else
5491 make_edge (bb, body_bb, EDGE_FALLTHRU);
5492 last_bb = bb;
5495 return collapse_bb;
5499 /* A subroutine of expand_omp_for. Generate code for a parallel
5500 loop with any schedule. Given parameters:
5502 for (V = N1; V cond N2; V += STEP) BODY;
5504 where COND is "<" or ">", we generate pseudocode
5506 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5507 if (more) goto L0; else goto L3;
5509 V = istart0;
5510 iend = iend0;
5512 BODY;
5513 V += STEP;
5514 if (V cond iend) goto L1; else goto L2;
5516 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5519 If this is a combined omp parallel loop, instead of the call to
5520 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5521 If this is gimple_omp_for_combined_p loop, then instead of assigning
5522 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5523 inner GIMPLE_OMP_FOR and V += STEP; and
5524 if (V cond iend) goto L1; else goto L2; are removed.
5526 For collapsed loops, given parameters:
5527 collapse(3)
5528 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5529 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5530 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5531 BODY;
5533 we generate pseudocode
5535 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5536 if (cond3 is <)
5537 adj = STEP3 - 1;
5538 else
5539 adj = STEP3 + 1;
5540 count3 = (adj + N32 - N31) / STEP3;
5541 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5542 if (cond2 is <)
5543 adj = STEP2 - 1;
5544 else
5545 adj = STEP2 + 1;
5546 count2 = (adj + N22 - N21) / STEP2;
5547 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5548 if (cond1 is <)
5549 adj = STEP1 - 1;
5550 else
5551 adj = STEP1 + 1;
5552 count1 = (adj + N12 - N11) / STEP1;
5553 count = count1 * count2 * count3;
5554 goto Z1;
5556 count = 0;
5558 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5559 if (more) goto L0; else goto L3;
5561 V = istart0;
5562 T = V;
5563 V3 = N31 + (T % count3) * STEP3;
5564 T = T / count3;
5565 V2 = N21 + (T % count2) * STEP2;
5566 T = T / count2;
5567 V1 = N11 + T * STEP1;
5568 iend = iend0;
5570 BODY;
5571 V += 1;
5572 if (V < iend) goto L10; else goto L2;
5573 L10:
5574 V3 += STEP3;
5575 if (V3 cond3 N32) goto L1; else goto L11;
5576 L11:
5577 V3 = N31;
5578 V2 += STEP2;
5579 if (V2 cond2 N22) goto L1; else goto L12;
5580 L12:
5581 V2 = N21;
5582 V1 += STEP1;
5583 goto L1;
5585 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5590 static void
5591 expand_omp_for_generic (struct omp_region *region,
5592 struct omp_for_data *fd,
5593 enum built_in_function start_fn,
5594 enum built_in_function next_fn,
5595 gimple inner_stmt)
5597 tree type, istart0, iend0, iend;
5598 tree t, vmain, vback, bias = NULL_TREE;
5599 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5600 basic_block l2_bb = NULL, l3_bb = NULL;
5601 gimple_stmt_iterator gsi;
5602 gimple_assign assign_stmt;
5603 bool in_combined_parallel = is_combined_parallel (region);
5604 bool broken_loop = region->cont == NULL;
5605 edge e, ne;
5606 tree *counts = NULL;
5607 int i;
5609 gcc_assert (!broken_loop || !in_combined_parallel);
5610 gcc_assert (fd->iter_type == long_integer_type_node
5611 || !in_combined_parallel);
5613 type = TREE_TYPE (fd->loop.v);
5614 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5615 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5616 TREE_ADDRESSABLE (istart0) = 1;
5617 TREE_ADDRESSABLE (iend0) = 1;
5619 /* See if we need to bias by LLONG_MIN. */
5620 if (fd->iter_type == long_long_unsigned_type_node
5621 && TREE_CODE (type) == INTEGER_TYPE
5622 && !TYPE_UNSIGNED (type))
5624 tree n1, n2;
5626 if (fd->loop.cond_code == LT_EXPR)
5628 n1 = fd->loop.n1;
5629 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5631 else
5633 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5634 n2 = fd->loop.n1;
5636 if (TREE_CODE (n1) != INTEGER_CST
5637 || TREE_CODE (n2) != INTEGER_CST
5638 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5639 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5642 entry_bb = region->entry;
5643 cont_bb = region->cont;
5644 collapse_bb = NULL;
5645 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5646 gcc_assert (broken_loop
5647 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5648 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5649 l1_bb = single_succ (l0_bb);
5650 if (!broken_loop)
5652 l2_bb = create_empty_bb (cont_bb);
5653 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5654 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5656 else
5657 l2_bb = NULL;
5658 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5659 exit_bb = region->exit;
5661 gsi = gsi_last_bb (entry_bb);
5663 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5664 if (fd->collapse > 1)
5666 int first_zero_iter = -1;
5667 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5669 counts = XALLOCAVEC (tree, fd->collapse);
5670 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5671 zero_iter_bb, first_zero_iter,
5672 l2_dom_bb);
5674 if (zero_iter_bb)
5676 /* Some counts[i] vars might be uninitialized if
5677 some loop has zero iterations. But the body shouldn't
5678 be executed in that case, so just avoid uninit warnings. */
5679 for (i = first_zero_iter; i < fd->collapse; i++)
5680 if (SSA_VAR_P (counts[i]))
5681 TREE_NO_WARNING (counts[i]) = 1;
5682 gsi_prev (&gsi);
5683 e = split_block (entry_bb, gsi_stmt (gsi));
5684 entry_bb = e->dest;
5685 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5686 gsi = gsi_last_bb (entry_bb);
5687 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5688 get_immediate_dominator (CDI_DOMINATORS,
5689 zero_iter_bb));
5692 if (in_combined_parallel)
5694 /* In a combined parallel loop, emit a call to
5695 GOMP_loop_foo_next. */
5696 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5697 build_fold_addr_expr (istart0),
5698 build_fold_addr_expr (iend0));
5700 else
5702 tree t0, t1, t2, t3, t4;
5703 /* If this is not a combined parallel loop, emit a call to
5704 GOMP_loop_foo_start in ENTRY_BB. */
5705 t4 = build_fold_addr_expr (iend0);
5706 t3 = build_fold_addr_expr (istart0);
5707 t2 = fold_convert (fd->iter_type, fd->loop.step);
5708 t1 = fd->loop.n2;
5709 t0 = fd->loop.n1;
5710 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5712 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5713 OMP_CLAUSE__LOOPTEMP_);
5714 gcc_assert (innerc);
5715 t0 = OMP_CLAUSE_DECL (innerc);
5716 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5717 OMP_CLAUSE__LOOPTEMP_);
5718 gcc_assert (innerc);
5719 t1 = OMP_CLAUSE_DECL (innerc);
5721 if (POINTER_TYPE_P (TREE_TYPE (t0))
5722 && TYPE_PRECISION (TREE_TYPE (t0))
5723 != TYPE_PRECISION (fd->iter_type))
5725 /* Avoid casting pointers to integer of a different size. */
5726 tree itype = signed_type_for (type);
5727 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5728 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5730 else
5732 t1 = fold_convert (fd->iter_type, t1);
5733 t0 = fold_convert (fd->iter_type, t0);
5735 if (bias)
5737 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5738 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5740 if (fd->iter_type == long_integer_type_node)
5742 if (fd->chunk_size)
5744 t = fold_convert (fd->iter_type, fd->chunk_size);
5745 t = build_call_expr (builtin_decl_explicit (start_fn),
5746 6, t0, t1, t2, t, t3, t4);
5748 else
5749 t = build_call_expr (builtin_decl_explicit (start_fn),
5750 5, t0, t1, t2, t3, t4);
5752 else
5754 tree t5;
5755 tree c_bool_type;
5756 tree bfn_decl;
5758 /* The GOMP_loop_ull_*start functions have additional boolean
5759 argument, true for < loops and false for > loops.
5760 In Fortran, the C bool type can be different from
5761 boolean_type_node. */
5762 bfn_decl = builtin_decl_explicit (start_fn);
5763 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5764 t5 = build_int_cst (c_bool_type,
5765 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5766 if (fd->chunk_size)
5768 tree bfn_decl = builtin_decl_explicit (start_fn);
5769 t = fold_convert (fd->iter_type, fd->chunk_size);
5770 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5772 else
5773 t = build_call_expr (builtin_decl_explicit (start_fn),
5774 6, t5, t0, t1, t2, t3, t4);
5777 if (TREE_TYPE (t) != boolean_type_node)
5778 t = fold_build2 (NE_EXPR, boolean_type_node,
5779 t, build_int_cst (TREE_TYPE (t), 0));
5780 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5781 true, GSI_SAME_STMT);
5782 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5784 /* Remove the GIMPLE_OMP_FOR statement. */
5785 gsi_remove (&gsi, true);
5787 /* Iteration setup for sequential loop goes in L0_BB. */
5788 tree startvar = fd->loop.v;
5789 tree endvar = NULL_TREE;
5791 if (gimple_omp_for_combined_p (fd->for_stmt))
5793 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5794 && gimple_omp_for_kind (inner_stmt)
5795 == GF_OMP_FOR_KIND_SIMD);
5796 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5797 OMP_CLAUSE__LOOPTEMP_);
5798 gcc_assert (innerc);
5799 startvar = OMP_CLAUSE_DECL (innerc);
5800 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5801 OMP_CLAUSE__LOOPTEMP_);
5802 gcc_assert (innerc);
5803 endvar = OMP_CLAUSE_DECL (innerc);
5806 gsi = gsi_start_bb (l0_bb);
5807 t = istart0;
5808 if (bias)
5809 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5810 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5811 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5812 t = fold_convert (TREE_TYPE (startvar), t);
5813 t = force_gimple_operand_gsi (&gsi, t,
5814 DECL_P (startvar)
5815 && TREE_ADDRESSABLE (startvar),
5816 NULL_TREE, false, GSI_CONTINUE_LINKING);
5817 assign_stmt = gimple_build_assign (startvar, t);
5818 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5820 t = iend0;
5821 if (bias)
5822 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5823 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5824 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5825 t = fold_convert (TREE_TYPE (startvar), t);
5826 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5827 false, GSI_CONTINUE_LINKING);
5828 if (endvar)
5830 assign_stmt = gimple_build_assign (endvar, iend);
5831 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5832 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
5833 assign_stmt = gimple_build_assign (fd->loop.v, iend);
5834 else
5835 assign_stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, iend,
5836 NULL_TREE);
5837 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5839 if (fd->collapse > 1)
5840 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5842 if (!broken_loop)
5844 /* Code to control the increment and predicate for the sequential
5845 loop goes in the CONT_BB. */
5846 gsi = gsi_last_bb (cont_bb);
5847 gimple_omp_continue cont_stmt =
5848 as_a <gimple_omp_continue> (gsi_stmt (gsi));
5849 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
5850 vmain = gimple_omp_continue_control_use (cont_stmt);
5851 vback = gimple_omp_continue_control_def (cont_stmt);
5853 if (!gimple_omp_for_combined_p (fd->for_stmt))
5855 if (POINTER_TYPE_P (type))
5856 t = fold_build_pointer_plus (vmain, fd->loop.step);
5857 else
5858 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5859 t = force_gimple_operand_gsi (&gsi, t,
5860 DECL_P (vback)
5861 && TREE_ADDRESSABLE (vback),
5862 NULL_TREE, true, GSI_SAME_STMT);
5863 assign_stmt = gimple_build_assign (vback, t);
5864 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
5866 t = build2 (fd->loop.cond_code, boolean_type_node,
5867 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5868 iend);
5869 gimple_cond cond_stmt = gimple_build_cond_empty (t);
5870 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
5873 /* Remove GIMPLE_OMP_CONTINUE. */
5874 gsi_remove (&gsi, true);
5876 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5877 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5879 /* Emit code to get the next parallel iteration in L2_BB. */
5880 gsi = gsi_start_bb (l2_bb);
5882 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5883 build_fold_addr_expr (istart0),
5884 build_fold_addr_expr (iend0));
5885 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5886 false, GSI_CONTINUE_LINKING);
5887 if (TREE_TYPE (t) != boolean_type_node)
5888 t = fold_build2 (NE_EXPR, boolean_type_node,
5889 t, build_int_cst (TREE_TYPE (t), 0));
5890 gimple_cond cond_stmt = gimple_build_cond_empty (t);
5891 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
5894 /* Add the loop cleanup function. */
5895 gsi = gsi_last_bb (exit_bb);
5896 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5897 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5898 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5899 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5900 else
5901 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5902 gimple_call call_stmt = gimple_build_call (t, 0);
5903 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5904 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5905 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
5906 gsi_remove (&gsi, true);
5908 /* Connect the new blocks. */
5909 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5910 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5912 if (!broken_loop)
5914 gimple_seq phis;
5916 e = find_edge (cont_bb, l3_bb);
5917 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5919 phis = phi_nodes (l3_bb);
5920 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5922 gimple phi = gsi_stmt (gsi);
5923 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5924 PHI_ARG_DEF_FROM_EDGE (phi, e));
5926 remove_edge (e);
5928 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5929 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5930 e = find_edge (cont_bb, l1_bb);
5931 if (gimple_omp_for_combined_p (fd->for_stmt))
5933 remove_edge (e);
5934 e = NULL;
5936 else if (fd->collapse > 1)
5938 remove_edge (e);
5939 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5941 else
5942 e->flags = EDGE_TRUE_VALUE;
5943 if (e)
5945 e->probability = REG_BR_PROB_BASE * 7 / 8;
5946 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5948 else
5950 e = find_edge (cont_bb, l2_bb);
5951 e->flags = EDGE_FALLTHRU;
5953 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5955 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5956 recompute_dominator (CDI_DOMINATORS, l2_bb));
5957 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5958 recompute_dominator (CDI_DOMINATORS, l3_bb));
5959 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5960 recompute_dominator (CDI_DOMINATORS, l0_bb));
5961 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5962 recompute_dominator (CDI_DOMINATORS, l1_bb));
5964 struct loop *outer_loop = alloc_loop ();
5965 outer_loop->header = l0_bb;
5966 outer_loop->latch = l2_bb;
5967 add_loop (outer_loop, l0_bb->loop_father);
5969 if (!gimple_omp_for_combined_p (fd->for_stmt))
5971 struct loop *loop = alloc_loop ();
5972 loop->header = l1_bb;
5973 /* The loop may have multiple latches. */
5974 add_loop (loop, outer_loop);
5980 /* A subroutine of expand_omp_for. Generate code for a parallel
5981 loop with static schedule and no specified chunk size. Given
5982 parameters:
5984 for (V = N1; V cond N2; V += STEP) BODY;
5986 where COND is "<" or ">", we generate pseudocode
5988 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5989 if (cond is <)
5990 adj = STEP - 1;
5991 else
5992 adj = STEP + 1;
5993 if ((__typeof (V)) -1 > 0 && cond is >)
5994 n = -(adj + N2 - N1) / -STEP;
5995 else
5996 n = (adj + N2 - N1) / STEP;
5997 q = n / nthreads;
5998 tt = n % nthreads;
5999 if (threadid < tt) goto L3; else goto L4;
6001 tt = 0;
6002 q = q + 1;
6004 s0 = q * threadid + tt;
6005 e0 = s0 + q;
6006 V = s0 * STEP + N1;
6007 if (s0 >= e0) goto L2; else goto L0;
6009 e = e0 * STEP + N1;
6011 BODY;
6012 V += STEP;
6013 if (V cond e) goto L1;
6017 static void
6018 expand_omp_for_static_nochunk (struct omp_region *region,
6019 struct omp_for_data *fd,
6020 gimple inner_stmt)
6022 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
6023 tree type, itype, vmain, vback;
6024 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
6025 basic_block body_bb, cont_bb, collapse_bb = NULL;
6026 basic_block fin_bb;
6027 gimple_stmt_iterator gsi;
6028 edge ep;
6029 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6030 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6031 bool broken_loop = region->cont == NULL;
6032 tree *counts = NULL;
6033 tree n1, n2, step;
6035 itype = type = TREE_TYPE (fd->loop.v);
6036 if (POINTER_TYPE_P (type))
6037 itype = signed_type_for (type);
6039 entry_bb = region->entry;
6040 cont_bb = region->cont;
6041 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6042 fin_bb = BRANCH_EDGE (entry_bb)->dest;
6043 gcc_assert (broken_loop
6044 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
6045 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
6046 body_bb = single_succ (seq_start_bb);
6047 if (!broken_loop)
6049 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6050 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6052 exit_bb = region->exit;
6054 /* Iteration space partitioning goes in ENTRY_BB. */
6055 gsi = gsi_last_bb (entry_bb);
6056 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6058 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6060 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6061 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6064 if (fd->collapse > 1)
6066 int first_zero_iter = -1;
6067 basic_block l2_dom_bb = NULL;
6069 counts = XALLOCAVEC (tree, fd->collapse);
6070 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6071 fin_bb, first_zero_iter,
6072 l2_dom_bb);
6073 t = NULL_TREE;
6075 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6076 t = integer_one_node;
6077 else
6078 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6079 fold_convert (type, fd->loop.n1),
6080 fold_convert (type, fd->loop.n2));
6081 if (fd->collapse == 1
6082 && TYPE_UNSIGNED (type)
6083 && (t == NULL_TREE || !integer_onep (t)))
6085 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6086 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6087 true, GSI_SAME_STMT);
6088 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6089 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6090 true, GSI_SAME_STMT);
6091 gimple_cond cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6092 NULL_TREE, NULL_TREE);
6093 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6094 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6095 expand_omp_regimplify_p, NULL, NULL)
6096 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6097 expand_omp_regimplify_p, NULL, NULL))
6099 gsi = gsi_for_stmt (cond_stmt);
6100 gimple_regimplify_operands (cond_stmt, &gsi);
6102 ep = split_block (entry_bb, cond_stmt);
6103 ep->flags = EDGE_TRUE_VALUE;
6104 entry_bb = ep->dest;
6105 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6106 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
6107 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
6108 if (gimple_in_ssa_p (cfun))
6110 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6111 for (gsi = gsi_start_phis (fin_bb);
6112 !gsi_end_p (gsi); gsi_next (&gsi))
6114 gimple phi = gsi_stmt (gsi);
6115 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6116 ep, UNKNOWN_LOCATION);
6119 gsi = gsi_last_bb (entry_bb);
6122 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6123 t = fold_convert (itype, t);
6124 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6125 true, GSI_SAME_STMT);
6127 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6128 t = fold_convert (itype, t);
6129 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6130 true, GSI_SAME_STMT);
6132 n1 = fd->loop.n1;
6133 n2 = fd->loop.n2;
6134 step = fd->loop.step;
6135 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6137 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6138 OMP_CLAUSE__LOOPTEMP_);
6139 gcc_assert (innerc);
6140 n1 = OMP_CLAUSE_DECL (innerc);
6141 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6142 OMP_CLAUSE__LOOPTEMP_);
6143 gcc_assert (innerc);
6144 n2 = OMP_CLAUSE_DECL (innerc);
6146 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6147 true, NULL_TREE, true, GSI_SAME_STMT);
6148 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6149 true, NULL_TREE, true, GSI_SAME_STMT);
6150 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6151 true, NULL_TREE, true, GSI_SAME_STMT);
6153 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6154 t = fold_build2 (PLUS_EXPR, itype, step, t);
6155 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6156 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6157 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6158 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6159 fold_build1 (NEGATE_EXPR, itype, t),
6160 fold_build1 (NEGATE_EXPR, itype, step));
6161 else
6162 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6163 t = fold_convert (itype, t);
6164 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6166 q = create_tmp_reg (itype, "q");
6167 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
6168 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6169 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
6171 tt = create_tmp_reg (itype, "tt");
6172 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
6173 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6174 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
6176 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
6177 gimple_cond cond_stmt = gimple_build_cond_empty (t);
6178 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6180 second_bb = split_block (entry_bb, cond_stmt)->dest;
6181 gsi = gsi_last_bb (second_bb);
6182 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6184 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
6185 GSI_SAME_STMT);
6186 gimple_assign assign_stmt =
6187 gimple_build_assign_with_ops (PLUS_EXPR, q, q,
6188 build_int_cst (itype, 1));
6189 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6191 third_bb = split_block (second_bb, assign_stmt)->dest;
6192 gsi = gsi_last_bb (third_bb);
6193 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6195 t = build2 (MULT_EXPR, itype, q, threadid);
6196 t = build2 (PLUS_EXPR, itype, t, tt);
6197 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6199 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6200 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6202 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6203 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6205 /* Remove the GIMPLE_OMP_FOR statement. */
6206 gsi_remove (&gsi, true);
6208 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6209 gsi = gsi_start_bb (seq_start_bb);
6211 tree startvar = fd->loop.v;
6212 tree endvar = NULL_TREE;
6214 if (gimple_omp_for_combined_p (fd->for_stmt))
6216 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6217 ? gimple_omp_parallel_clauses (inner_stmt)
6218 : gimple_omp_for_clauses (inner_stmt);
6219 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6220 gcc_assert (innerc);
6221 startvar = OMP_CLAUSE_DECL (innerc);
6222 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6223 OMP_CLAUSE__LOOPTEMP_);
6224 gcc_assert (innerc);
6225 endvar = OMP_CLAUSE_DECL (innerc);
6227 t = fold_convert (itype, s0);
6228 t = fold_build2 (MULT_EXPR, itype, t, step);
6229 if (POINTER_TYPE_P (type))
6230 t = fold_build_pointer_plus (n1, t);
6231 else
6232 t = fold_build2 (PLUS_EXPR, type, t, n1);
6233 t = fold_convert (TREE_TYPE (startvar), t);
6234 t = force_gimple_operand_gsi (&gsi, t,
6235 DECL_P (startvar)
6236 && TREE_ADDRESSABLE (startvar),
6237 NULL_TREE, false, GSI_CONTINUE_LINKING);
6238 assign_stmt = gimple_build_assign (startvar, t);
6239 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6241 t = fold_convert (itype, e0);
6242 t = fold_build2 (MULT_EXPR, itype, t, step);
6243 if (POINTER_TYPE_P (type))
6244 t = fold_build_pointer_plus (n1, t);
6245 else
6246 t = fold_build2 (PLUS_EXPR, type, t, n1);
6247 t = fold_convert (TREE_TYPE (startvar), t);
6248 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6249 false, GSI_CONTINUE_LINKING);
6250 if (endvar)
6252 assign_stmt = gimple_build_assign (endvar, e);
6253 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6254 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6255 assign_stmt = gimple_build_assign (fd->loop.v, e);
6256 else
6257 assign_stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6258 NULL_TREE);
6259 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6261 if (fd->collapse > 1)
6262 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6264 if (!broken_loop)
6266 /* The code controlling the sequential loop replaces the
6267 GIMPLE_OMP_CONTINUE. */
6268 gsi = gsi_last_bb (cont_bb);
6269 gimple_omp_continue cont_stmt =
6270 as_a <gimple_omp_continue> (gsi_stmt (gsi));
6271 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
6272 vmain = gimple_omp_continue_control_use (cont_stmt);
6273 vback = gimple_omp_continue_control_def (cont_stmt);
6275 if (!gimple_omp_for_combined_p (fd->for_stmt))
6277 if (POINTER_TYPE_P (type))
6278 t = fold_build_pointer_plus (vmain, step);
6279 else
6280 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6281 t = force_gimple_operand_gsi (&gsi, t,
6282 DECL_P (vback)
6283 && TREE_ADDRESSABLE (vback),
6284 NULL_TREE, true, GSI_SAME_STMT);
6285 assign_stmt = gimple_build_assign (vback, t);
6286 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6288 t = build2 (fd->loop.cond_code, boolean_type_node,
6289 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6290 ? t : vback, e);
6291 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6294 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6295 gsi_remove (&gsi, true);
6297 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6298 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6301 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6302 gsi = gsi_last_bb (exit_bb);
6303 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6305 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6306 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6308 gsi_remove (&gsi, true);
6310 /* Connect all the blocks. */
6311 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6312 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6313 ep = find_edge (entry_bb, second_bb);
6314 ep->flags = EDGE_TRUE_VALUE;
6315 ep->probability = REG_BR_PROB_BASE / 4;
6316 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6317 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6319 if (!broken_loop)
6321 ep = find_edge (cont_bb, body_bb);
6322 if (gimple_omp_for_combined_p (fd->for_stmt))
6324 remove_edge (ep);
6325 ep = NULL;
6327 else if (fd->collapse > 1)
6329 remove_edge (ep);
6330 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6332 else
6333 ep->flags = EDGE_TRUE_VALUE;
6334 find_edge (cont_bb, fin_bb)->flags
6335 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6338 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6339 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6340 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6342 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6343 recompute_dominator (CDI_DOMINATORS, body_bb));
6344 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6345 recompute_dominator (CDI_DOMINATORS, fin_bb));
6347 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6349 struct loop *loop = alloc_loop ();
6350 loop->header = body_bb;
6351 if (collapse_bb == NULL)
6352 loop->latch = cont_bb;
6353 add_loop (loop, body_bb->loop_father);
6358 /* A subroutine of expand_omp_for. Generate code for a parallel
6359 loop with static schedule and a specified chunk size. Given
6360 parameters:
6362 for (V = N1; V cond N2; V += STEP) BODY;
6364 where COND is "<" or ">", we generate pseudocode
6366 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6367 if (cond is <)
6368 adj = STEP - 1;
6369 else
6370 adj = STEP + 1;
6371 if ((__typeof (V)) -1 > 0 && cond is >)
6372 n = -(adj + N2 - N1) / -STEP;
6373 else
6374 n = (adj + N2 - N1) / STEP;
6375 trip = 0;
6376 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6377 here so that V is defined
6378 if the loop is not entered
6380 s0 = (trip * nthreads + threadid) * CHUNK;
6381 e0 = min(s0 + CHUNK, n);
6382 if (s0 < n) goto L1; else goto L4;
6384 V = s0 * STEP + N1;
6385 e = e0 * STEP + N1;
6387 BODY;
6388 V += STEP;
6389 if (V cond e) goto L2; else goto L3;
6391 trip += 1;
6392 goto L0;
6396 static void
6397 expand_omp_for_static_chunk (struct omp_region *region,
6398 struct omp_for_data *fd, gimple inner_stmt)
6400 tree n, s0, e0, e, t;
6401 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6402 tree type, itype, vmain, vback, vextra;
6403 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6404 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6405 gimple_stmt_iterator gsi;
6406 edge se;
6407 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6408 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6409 bool broken_loop = region->cont == NULL;
6410 tree *counts = NULL;
6411 tree n1, n2, step;
6413 itype = type = TREE_TYPE (fd->loop.v);
6414 if (POINTER_TYPE_P (type))
6415 itype = signed_type_for (type);
6417 entry_bb = region->entry;
6418 se = split_block (entry_bb, last_stmt (entry_bb));
6419 entry_bb = se->src;
6420 iter_part_bb = se->dest;
6421 cont_bb = region->cont;
6422 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6423 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6424 gcc_assert (broken_loop
6425 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6426 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6427 body_bb = single_succ (seq_start_bb);
6428 if (!broken_loop)
6430 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6431 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6432 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6434 exit_bb = region->exit;
6436 /* Trip and adjustment setup goes in ENTRY_BB. */
6437 gsi = gsi_last_bb (entry_bb);
6438 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6440 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6442 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6443 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6446 if (fd->collapse > 1)
6448 int first_zero_iter = -1;
6449 basic_block l2_dom_bb = NULL;
6451 counts = XALLOCAVEC (tree, fd->collapse);
6452 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6453 fin_bb, first_zero_iter,
6454 l2_dom_bb);
6455 t = NULL_TREE;
6457 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6458 t = integer_one_node;
6459 else
6460 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6461 fold_convert (type, fd->loop.n1),
6462 fold_convert (type, fd->loop.n2));
6463 if (fd->collapse == 1
6464 && TYPE_UNSIGNED (type)
6465 && (t == NULL_TREE || !integer_onep (t)))
6467 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6468 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6469 true, GSI_SAME_STMT);
6470 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6471 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6472 true, GSI_SAME_STMT);
6473 gimple_cond cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6474 NULL_TREE, NULL_TREE);
6475 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6476 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6477 expand_omp_regimplify_p, NULL, NULL)
6478 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6479 expand_omp_regimplify_p, NULL, NULL))
6481 gsi = gsi_for_stmt (cond_stmt);
6482 gimple_regimplify_operands (cond_stmt, &gsi);
6484 se = split_block (entry_bb, cond_stmt);
6485 se->flags = EDGE_TRUE_VALUE;
6486 entry_bb = se->dest;
6487 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6488 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6489 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6490 if (gimple_in_ssa_p (cfun))
6492 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6493 for (gsi = gsi_start_phis (fin_bb);
6494 !gsi_end_p (gsi); gsi_next (&gsi))
6496 gimple phi = gsi_stmt (gsi);
6497 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6498 se, UNKNOWN_LOCATION);
6501 gsi = gsi_last_bb (entry_bb);
6504 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6505 t = fold_convert (itype, t);
6506 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6507 true, GSI_SAME_STMT);
6509 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6510 t = fold_convert (itype, t);
6511 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6512 true, GSI_SAME_STMT);
6514 n1 = fd->loop.n1;
6515 n2 = fd->loop.n2;
6516 step = fd->loop.step;
6517 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6519 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6520 OMP_CLAUSE__LOOPTEMP_);
6521 gcc_assert (innerc);
6522 n1 = OMP_CLAUSE_DECL (innerc);
6523 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6524 OMP_CLAUSE__LOOPTEMP_);
6525 gcc_assert (innerc);
6526 n2 = OMP_CLAUSE_DECL (innerc);
6528 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6529 true, NULL_TREE, true, GSI_SAME_STMT);
6530 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6531 true, NULL_TREE, true, GSI_SAME_STMT);
6532 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6533 true, NULL_TREE, true, GSI_SAME_STMT);
6534 fd->chunk_size
6535 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
6536 true, NULL_TREE, true, GSI_SAME_STMT);
6538 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6539 t = fold_build2 (PLUS_EXPR, itype, step, t);
6540 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6541 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6542 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6543 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6544 fold_build1 (NEGATE_EXPR, itype, t),
6545 fold_build1 (NEGATE_EXPR, itype, step));
6546 else
6547 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6548 t = fold_convert (itype, t);
6549 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6550 true, GSI_SAME_STMT);
6552 trip_var = create_tmp_reg (itype, ".trip");
6553 if (gimple_in_ssa_p (cfun))
6555 trip_init = make_ssa_name (trip_var, NULL);
6556 trip_main = make_ssa_name (trip_var, NULL);
6557 trip_back = make_ssa_name (trip_var, NULL);
6559 else
6561 trip_init = trip_var;
6562 trip_main = trip_var;
6563 trip_back = trip_var;
6566 gimple_assign assign_stmt =
6567 gimple_build_assign (trip_init, build_int_cst (itype, 0));
6568 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6570 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6571 t = fold_build2 (MULT_EXPR, itype, t, step);
6572 if (POINTER_TYPE_P (type))
6573 t = fold_build_pointer_plus (n1, t);
6574 else
6575 t = fold_build2 (PLUS_EXPR, type, t, n1);
6576 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6577 true, GSI_SAME_STMT);
6579 /* Remove the GIMPLE_OMP_FOR. */
6580 gsi_remove (&gsi, true);
6582 /* Iteration space partitioning goes in ITER_PART_BB. */
6583 gsi = gsi_last_bb (iter_part_bb);
6585 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6586 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6587 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6588 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6589 false, GSI_CONTINUE_LINKING);
6591 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6592 t = fold_build2 (MIN_EXPR, itype, t, n);
6593 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6594 false, GSI_CONTINUE_LINKING);
6596 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6597 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6599 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6600 gsi = gsi_start_bb (seq_start_bb);
6602 tree startvar = fd->loop.v;
6603 tree endvar = NULL_TREE;
6605 if (gimple_omp_for_combined_p (fd->for_stmt))
6607 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6608 ? gimple_omp_parallel_clauses (inner_stmt)
6609 : gimple_omp_for_clauses (inner_stmt);
6610 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6611 gcc_assert (innerc);
6612 startvar = OMP_CLAUSE_DECL (innerc);
6613 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6614 OMP_CLAUSE__LOOPTEMP_);
6615 gcc_assert (innerc);
6616 endvar = OMP_CLAUSE_DECL (innerc);
6619 t = fold_convert (itype, s0);
6620 t = fold_build2 (MULT_EXPR, itype, t, step);
6621 if (POINTER_TYPE_P (type))
6622 t = fold_build_pointer_plus (n1, t);
6623 else
6624 t = fold_build2 (PLUS_EXPR, type, t, n1);
6625 t = fold_convert (TREE_TYPE (startvar), t);
6626 t = force_gimple_operand_gsi (&gsi, t,
6627 DECL_P (startvar)
6628 && TREE_ADDRESSABLE (startvar),
6629 NULL_TREE, false, GSI_CONTINUE_LINKING);
6630 assign_stmt = gimple_build_assign (startvar, t);
6631 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6633 t = fold_convert (itype, e0);
6634 t = fold_build2 (MULT_EXPR, itype, t, step);
6635 if (POINTER_TYPE_P (type))
6636 t = fold_build_pointer_plus (n1, t);
6637 else
6638 t = fold_build2 (PLUS_EXPR, type, t, n1);
6639 t = fold_convert (TREE_TYPE (startvar), t);
6640 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6641 false, GSI_CONTINUE_LINKING);
6642 if (endvar)
6644 assign_stmt = gimple_build_assign (endvar, e);
6645 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6646 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6647 assign_stmt = gimple_build_assign (fd->loop.v, e);
6648 else
6649 assign_stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6650 NULL_TREE);
6651 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6653 if (fd->collapse > 1)
6654 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6656 if (!broken_loop)
6658 /* The code controlling the sequential loop goes in CONT_BB,
6659 replacing the GIMPLE_OMP_CONTINUE. */
6660 gsi = gsi_last_bb (cont_bb);
6661 gimple_omp_continue cont_stmt =
6662 as_a <gimple_omp_continue> (gsi_stmt (gsi));
6663 vmain = gimple_omp_continue_control_use (cont_stmt);
6664 vback = gimple_omp_continue_control_def (cont_stmt);
6666 if (!gimple_omp_for_combined_p (fd->for_stmt))
6668 if (POINTER_TYPE_P (type))
6669 t = fold_build_pointer_plus (vmain, step);
6670 else
6671 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6672 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
6673 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6674 true, GSI_SAME_STMT);
6675 assign_stmt = gimple_build_assign (vback, t);
6676 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6678 t = build2 (fd->loop.cond_code, boolean_type_node,
6679 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6680 ? t : vback, e);
6681 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6684 /* Remove GIMPLE_OMP_CONTINUE. */
6685 gsi_remove (&gsi, true);
6687 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6688 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6690 /* Trip update code goes into TRIP_UPDATE_BB. */
6691 gsi = gsi_start_bb (trip_update_bb);
6693 t = build_int_cst (itype, 1);
6694 t = build2 (PLUS_EXPR, itype, trip_main, t);
6695 assign_stmt = gimple_build_assign (trip_back, t);
6696 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6699 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6700 gsi = gsi_last_bb (exit_bb);
6701 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6703 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6704 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6706 gsi_remove (&gsi, true);
6708 /* Connect the new blocks. */
6709 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6710 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6712 if (!broken_loop)
6714 se = find_edge (cont_bb, body_bb);
6715 if (gimple_omp_for_combined_p (fd->for_stmt))
6717 remove_edge (se);
6718 se = NULL;
6720 else if (fd->collapse > 1)
6722 remove_edge (se);
6723 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6725 else
6726 se->flags = EDGE_TRUE_VALUE;
6727 find_edge (cont_bb, trip_update_bb)->flags
6728 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6730 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6733 if (gimple_in_ssa_p (cfun))
6735 gimple_stmt_iterator psi;
6736 gimple phi;
6737 edge re, ene;
6738 edge_var_map *vm;
6739 size_t i;
6741 gcc_assert (fd->collapse == 1 && !broken_loop);
6743 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6744 remove arguments of the phi nodes in fin_bb. We need to create
6745 appropriate phi nodes in iter_part_bb instead. */
6746 se = single_pred_edge (fin_bb);
6747 re = single_succ_edge (trip_update_bb);
6748 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
6749 ene = single_succ_edge (entry_bb);
6751 psi = gsi_start_phis (fin_bb);
6752 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6753 gsi_next (&psi), ++i)
6755 gimple nphi;
6756 source_location locus;
6758 phi = gsi_stmt (psi);
6759 t = gimple_phi_result (phi);
6760 gcc_assert (t == redirect_edge_var_map_result (vm));
6761 nphi = create_phi_node (t, iter_part_bb);
6763 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6764 locus = gimple_phi_arg_location_from_edge (phi, se);
6766 /* A special case -- fd->loop.v is not yet computed in
6767 iter_part_bb, we need to use vextra instead. */
6768 if (t == fd->loop.v)
6769 t = vextra;
6770 add_phi_arg (nphi, t, ene, locus);
6771 locus = redirect_edge_var_map_location (vm);
6772 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6774 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6775 redirect_edge_var_map_clear (re);
6776 while (1)
6778 psi = gsi_start_phis (fin_bb);
6779 if (gsi_end_p (psi))
6780 break;
6781 remove_phi_node (&psi, false);
6784 /* Make phi node for trip. */
6785 phi = create_phi_node (trip_main, iter_part_bb);
6786 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6787 UNKNOWN_LOCATION);
6788 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6789 UNKNOWN_LOCATION);
6792 if (!broken_loop)
6793 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6794 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6795 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6796 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6797 recompute_dominator (CDI_DOMINATORS, fin_bb));
6798 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6799 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6800 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6801 recompute_dominator (CDI_DOMINATORS, body_bb));
6803 if (!broken_loop)
6805 struct loop *trip_loop = alloc_loop ();
6806 trip_loop->header = iter_part_bb;
6807 trip_loop->latch = trip_update_bb;
6808 add_loop (trip_loop, iter_part_bb->loop_father);
6810 if (!gimple_omp_for_combined_p (fd->for_stmt))
6812 struct loop *loop = alloc_loop ();
6813 loop->header = body_bb;
6814 if (collapse_bb == NULL)
6815 loop->latch = cont_bb;
6816 add_loop (loop, trip_loop);
6821 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
6822 Given parameters:
6823 for (V = N1; V cond N2; V += STEP) BODY;
6825 where COND is "<" or ">" or "!=", we generate pseudocode
6827 for (ind_var = low; ind_var < high; ind_var++)
6829 V = n1 + (ind_var * STEP)
6831 <BODY>
6834 In the above pseudocode, low and high are function parameters of the
6835 child function. In the function below, we are inserting a temp.
6836 variable that will be making a call to two OMP functions that will not be
6837 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
6838 with _Cilk_for). These functions are replaced with low and high
6839 by the function that handles taskreg. */
6842 static void
6843 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
6845 bool broken_loop = region->cont == NULL;
6846 basic_block entry_bb = region->entry;
6847 basic_block cont_bb = region->cont;
6849 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6850 gcc_assert (broken_loop
6851 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6852 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6853 basic_block l1_bb, l2_bb;
6855 if (!broken_loop)
6857 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6858 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6859 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6860 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6862 else
6864 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6865 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6866 l2_bb = single_succ (l1_bb);
6868 basic_block exit_bb = region->exit;
6869 basic_block l2_dom_bb = NULL;
6871 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
6873 /* Below statements until the "tree high_val = ..." are pseudo statements
6874 used to pass information to be used by expand_omp_taskreg.
6875 low_val and high_val will be replaced by the __low and __high
6876 parameter from the child function.
6878 The call_exprs part is a place-holder, it is mainly used
6879 to distinctly identify to the top-level part that this is
6880 where we should put low and high (reasoning given in header
6881 comment). */
6883 tree child_fndecl
6884 = gimple_omp_parallel_child_fn (
6885 as_a <gimple_omp_parallel> (last_stmt (region->outer->entry)));
6886 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
6887 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
6889 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
6890 high_val = t;
6891 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
6892 low_val = t;
6894 gcc_assert (low_val && high_val);
6896 tree type = TREE_TYPE (low_val);
6897 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
6898 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6900 /* Not needed in SSA form right now. */
6901 gcc_assert (!gimple_in_ssa_p (cfun));
6902 if (l2_dom_bb == NULL)
6903 l2_dom_bb = l1_bb;
6905 tree n1 = low_val;
6906 tree n2 = high_val;
6908 gimple stmt = gimple_build_assign (ind_var, n1);
6910 /* Replace the GIMPLE_OMP_FOR statement. */
6911 gsi_replace (&gsi, stmt, true);
6913 if (!broken_loop)
6915 /* Code to control the increment goes in the CONT_BB. */
6916 gsi = gsi_last_bb (cont_bb);
6917 stmt = gsi_stmt (gsi);
6918 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6919 stmt = gimple_build_assign_with_ops (PLUS_EXPR, ind_var, ind_var,
6920 build_one_cst (type));
6922 /* Replace GIMPLE_OMP_CONTINUE. */
6923 gsi_replace (&gsi, stmt, true);
6926 /* Emit the condition in L1_BB. */
6927 gsi = gsi_after_labels (l1_bb);
6928 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
6929 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
6930 fd->loop.step);
6931 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
6932 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6933 fd->loop.n1, fold_convert (sizetype, t));
6934 else
6935 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6936 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
6937 t = fold_convert (TREE_TYPE (fd->loop.v), t);
6938 expand_omp_build_assign (&gsi, fd->loop.v, t);
6940 /* The condition is always '<' since the runtime will fill in the low
6941 and high values. */
6942 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
6943 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6945 /* Remove GIMPLE_OMP_RETURN. */
6946 gsi = gsi_last_bb (exit_bb);
6947 gsi_remove (&gsi, true);
6949 /* Connect the new blocks. */
6950 remove_edge (FALLTHRU_EDGE (entry_bb));
6952 edge e, ne;
6953 if (!broken_loop)
6955 remove_edge (BRANCH_EDGE (entry_bb));
6956 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6958 e = BRANCH_EDGE (l1_bb);
6959 ne = FALLTHRU_EDGE (l1_bb);
6960 e->flags = EDGE_TRUE_VALUE;
6962 else
6964 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6966 ne = single_succ_edge (l1_bb);
6967 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6970 ne->flags = EDGE_FALSE_VALUE;
6971 e->probability = REG_BR_PROB_BASE * 7 / 8;
6972 ne->probability = REG_BR_PROB_BASE / 8;
6974 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6975 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6976 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6978 if (!broken_loop)
6980 struct loop *loop = alloc_loop ();
6981 loop->header = l1_bb;
6982 loop->latch = cont_bb;
6983 add_loop (loop, l1_bb->loop_father);
6984 loop->safelen = INT_MAX;
6987 /* Pick the correct library function based on the precision of the
6988 induction variable type. */
6989 tree lib_fun = NULL_TREE;
6990 if (TYPE_PRECISION (type) == 32)
6991 lib_fun = cilk_for_32_fndecl;
6992 else if (TYPE_PRECISION (type) == 64)
6993 lib_fun = cilk_for_64_fndecl;
6994 else
6995 gcc_unreachable ();
6997 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
6999 /* WS_ARGS contains the library function flavor to call:
7000 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
7001 user-defined grain value. If the user does not define one, then zero
7002 is passed in by the parser. */
7003 vec_alloc (region->ws_args, 2);
7004 region->ws_args->quick_push (lib_fun);
7005 region->ws_args->quick_push (fd->chunk_size);
7008 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
7009 loop. Given parameters:
7011 for (V = N1; V cond N2; V += STEP) BODY;
7013 where COND is "<" or ">", we generate pseudocode
7015 V = N1;
7016 goto L1;
7018 BODY;
7019 V += STEP;
7021 if (V cond N2) goto L0; else goto L2;
7024 For collapsed loops, given parameters:
7025 collapse(3)
7026 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
7027 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
7028 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
7029 BODY;
7031 we generate pseudocode
7033 if (cond3 is <)
7034 adj = STEP3 - 1;
7035 else
7036 adj = STEP3 + 1;
7037 count3 = (adj + N32 - N31) / STEP3;
7038 if (cond2 is <)
7039 adj = STEP2 - 1;
7040 else
7041 adj = STEP2 + 1;
7042 count2 = (adj + N22 - N21) / STEP2;
7043 if (cond1 is <)
7044 adj = STEP1 - 1;
7045 else
7046 adj = STEP1 + 1;
7047 count1 = (adj + N12 - N11) / STEP1;
7048 count = count1 * count2 * count3;
7049 V = 0;
7050 V1 = N11;
7051 V2 = N21;
7052 V3 = N31;
7053 goto L1;
7055 BODY;
7056 V += 1;
7057 V3 += STEP3;
7058 V2 += (V3 cond3 N32) ? 0 : STEP2;
7059 V3 = (V3 cond3 N32) ? V3 : N31;
7060 V1 += (V2 cond2 N22) ? 0 : STEP1;
7061 V2 = (V2 cond2 N22) ? V2 : N21;
7063 if (V < count) goto L0; else goto L2;
7068 static void
7069 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
7071 tree type, t;
7072 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
7073 gimple_stmt_iterator gsi;
7074 gimple stmt;
7075 bool broken_loop = region->cont == NULL;
7076 edge e, ne;
7077 tree *counts = NULL;
7078 int i;
7079 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7080 OMP_CLAUSE_SAFELEN);
7081 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7082 OMP_CLAUSE__SIMDUID_);
7083 tree n1, n2;
7085 type = TREE_TYPE (fd->loop.v);
7086 entry_bb = region->entry;
7087 cont_bb = region->cont;
7088 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7089 gcc_assert (broken_loop
7090 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7091 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7092 if (!broken_loop)
7094 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7095 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7096 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7097 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7099 else
7101 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7102 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7103 l2_bb = single_succ (l1_bb);
7105 exit_bb = region->exit;
7106 l2_dom_bb = NULL;
7108 gsi = gsi_last_bb (entry_bb);
7110 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7111 /* Not needed in SSA form right now. */
7112 gcc_assert (!gimple_in_ssa_p (cfun));
7113 if (fd->collapse > 1)
7115 int first_zero_iter = -1;
7116 basic_block zero_iter_bb = l2_bb;
7118 counts = XALLOCAVEC (tree, fd->collapse);
7119 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
7120 zero_iter_bb, first_zero_iter,
7121 l2_dom_bb);
7123 if (l2_dom_bb == NULL)
7124 l2_dom_bb = l1_bb;
7126 n1 = fd->loop.n1;
7127 n2 = fd->loop.n2;
7128 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7130 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7131 OMP_CLAUSE__LOOPTEMP_);
7132 gcc_assert (innerc);
7133 n1 = OMP_CLAUSE_DECL (innerc);
7134 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7135 OMP_CLAUSE__LOOPTEMP_);
7136 gcc_assert (innerc);
7137 n2 = OMP_CLAUSE_DECL (innerc);
7138 expand_omp_build_assign (&gsi, fd->loop.v,
7139 fold_convert (type, n1));
7140 if (fd->collapse > 1)
7142 gsi_prev (&gsi);
7143 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
7144 gsi_next (&gsi);
7147 else
7149 expand_omp_build_assign (&gsi, fd->loop.v,
7150 fold_convert (type, fd->loop.n1));
7151 if (fd->collapse > 1)
7152 for (i = 0; i < fd->collapse; i++)
7154 tree itype = TREE_TYPE (fd->loops[i].v);
7155 if (POINTER_TYPE_P (itype))
7156 itype = signed_type_for (itype);
7157 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
7158 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7162 /* Remove the GIMPLE_OMP_FOR statement. */
7163 gsi_remove (&gsi, true);
7165 if (!broken_loop)
7167 /* Code to control the increment goes in the CONT_BB. */
7168 gsi = gsi_last_bb (cont_bb);
7169 stmt = gsi_stmt (gsi);
7170 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7172 if (POINTER_TYPE_P (type))
7173 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
7174 else
7175 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
7176 expand_omp_build_assign (&gsi, fd->loop.v, t);
7178 if (fd->collapse > 1)
7180 i = fd->collapse - 1;
7181 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
7183 t = fold_convert (sizetype, fd->loops[i].step);
7184 t = fold_build_pointer_plus (fd->loops[i].v, t);
7186 else
7188 t = fold_convert (TREE_TYPE (fd->loops[i].v),
7189 fd->loops[i].step);
7190 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
7191 fd->loops[i].v, t);
7193 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7195 for (i = fd->collapse - 1; i > 0; i--)
7197 tree itype = TREE_TYPE (fd->loops[i].v);
7198 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
7199 if (POINTER_TYPE_P (itype2))
7200 itype2 = signed_type_for (itype2);
7201 t = build3 (COND_EXPR, itype2,
7202 build2 (fd->loops[i].cond_code, boolean_type_node,
7203 fd->loops[i].v,
7204 fold_convert (itype, fd->loops[i].n2)),
7205 build_int_cst (itype2, 0),
7206 fold_convert (itype2, fd->loops[i - 1].step));
7207 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
7208 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
7209 else
7210 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
7211 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
7213 t = build3 (COND_EXPR, itype,
7214 build2 (fd->loops[i].cond_code, boolean_type_node,
7215 fd->loops[i].v,
7216 fold_convert (itype, fd->loops[i].n2)),
7217 fd->loops[i].v,
7218 fold_convert (itype, fd->loops[i].n1));
7219 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7223 /* Remove GIMPLE_OMP_CONTINUE. */
7224 gsi_remove (&gsi, true);
7227 /* Emit the condition in L1_BB. */
7228 gsi = gsi_start_bb (l1_bb);
7230 t = fold_convert (type, n2);
7231 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7232 false, GSI_CONTINUE_LINKING);
7233 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
7234 stmt = gimple_build_cond_empty (t);
7235 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7236 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
7237 NULL, NULL)
7238 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
7239 NULL, NULL))
7241 gsi = gsi_for_stmt (stmt);
7242 gimple_regimplify_operands (stmt, &gsi);
7245 /* Remove GIMPLE_OMP_RETURN. */
7246 gsi = gsi_last_bb (exit_bb);
7247 gsi_remove (&gsi, true);
7249 /* Connect the new blocks. */
7250 remove_edge (FALLTHRU_EDGE (entry_bb));
7252 if (!broken_loop)
7254 remove_edge (BRANCH_EDGE (entry_bb));
7255 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7257 e = BRANCH_EDGE (l1_bb);
7258 ne = FALLTHRU_EDGE (l1_bb);
7259 e->flags = EDGE_TRUE_VALUE;
7261 else
7263 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7265 ne = single_succ_edge (l1_bb);
7266 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7269 ne->flags = EDGE_FALSE_VALUE;
7270 e->probability = REG_BR_PROB_BASE * 7 / 8;
7271 ne->probability = REG_BR_PROB_BASE / 8;
7273 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7274 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7275 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7277 if (!broken_loop)
7279 struct loop *loop = alloc_loop ();
7280 loop->header = l1_bb;
7281 loop->latch = cont_bb;
7282 add_loop (loop, l1_bb->loop_father);
7283 if (safelen == NULL_TREE)
7284 loop->safelen = INT_MAX;
7285 else
7287 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
7288 if (TREE_CODE (safelen) != INTEGER_CST)
7289 loop->safelen = 0;
7290 else if (!tree_fits_uhwi_p (safelen)
7291 || tree_to_uhwi (safelen) > INT_MAX)
7292 loop->safelen = INT_MAX;
7293 else
7294 loop->safelen = tree_to_uhwi (safelen);
7295 if (loop->safelen == 1)
7296 loop->safelen = 0;
7298 if (simduid)
7300 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
7301 cfun->has_simduid_loops = true;
7303 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7304 the loop. */
7305 if ((flag_tree_loop_vectorize
7306 || (!global_options_set.x_flag_tree_loop_vectorize
7307 && !global_options_set.x_flag_tree_vectorize))
7308 && flag_tree_loop_optimize
7309 && loop->safelen > 1)
7311 loop->force_vectorize = true;
7312 cfun->has_force_vectorize_loops = true;
7318 /* Expand the OpenMP loop defined by REGION. */
7320 static void
7321 expand_omp_for (struct omp_region *region, gimple inner_stmt)
7323 struct omp_for_data fd;
7324 struct omp_for_data_loop *loops;
7326 loops
7327 = (struct omp_for_data_loop *)
7328 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
7329 * sizeof (struct omp_for_data_loop));
7330 extract_omp_for_data (as_a <gimple_omp_for> (last_stmt (region->entry)),
7331 &fd, loops);
7332 region->sched_kind = fd.sched_kind;
7334 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
7335 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7336 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7337 if (region->cont)
7339 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
7340 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7341 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7343 else
7344 /* If there isn't a continue then this is a degerate case where
7345 the introduction of abnormal edges during lowering will prevent
7346 original loops from being detected. Fix that up. */
7347 loops_state_set (LOOPS_NEED_FIXUP);
7349 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
7350 expand_omp_simd (region, &fd);
7351 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
7352 expand_cilk_for (region, &fd);
7353 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
7354 && !fd.have_ordered)
7356 if (fd.chunk_size == NULL)
7357 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
7358 else
7359 expand_omp_for_static_chunk (region, &fd, inner_stmt);
7361 else
7363 int fn_index, start_ix, next_ix;
7365 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
7366 == GF_OMP_FOR_KIND_FOR);
7367 if (fd.chunk_size == NULL
7368 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
7369 fd.chunk_size = integer_zero_node;
7370 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
7371 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
7372 ? 3 : fd.sched_kind;
7373 fn_index += fd.have_ordered * 4;
7374 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
7375 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
7376 if (fd.iter_type == long_long_unsigned_type_node)
7378 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
7379 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
7380 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
7381 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
7383 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
7384 (enum built_in_function) next_ix, inner_stmt);
7387 if (gimple_in_ssa_p (cfun))
7388 update_ssa (TODO_update_ssa_only_virtuals);
7392 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7394 v = GOMP_sections_start (n);
7396 switch (v)
7398 case 0:
7399 goto L2;
7400 case 1:
7401 section 1;
7402 goto L1;
7403 case 2:
7405 case n:
7407 default:
7408 abort ();
7411 v = GOMP_sections_next ();
7412 goto L0;
7414 reduction;
7416 If this is a combined parallel sections, replace the call to
7417 GOMP_sections_start with call to GOMP_sections_next. */
7419 static void
7420 expand_omp_sections (struct omp_region *region)
7422 tree t, u, vin = NULL, vmain, vnext, l2;
7423 unsigned len;
7424 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7425 gimple_stmt_iterator si, switch_si;
7426 gimple sections_stmt, stmt;
7427 gimple_omp_continue cont;
7428 edge_iterator ei;
7429 edge e;
7430 struct omp_region *inner;
7431 unsigned i, casei;
7432 bool exit_reachable = region->cont != NULL;
7434 gcc_assert (region->exit != NULL);
7435 entry_bb = region->entry;
7436 l0_bb = single_succ (entry_bb);
7437 l1_bb = region->cont;
7438 l2_bb = region->exit;
7439 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7440 l2 = gimple_block_label (l2_bb);
7441 else
7443 /* This can happen if there are reductions. */
7444 len = EDGE_COUNT (l0_bb->succs);
7445 gcc_assert (len > 0);
7446 e = EDGE_SUCC (l0_bb, len - 1);
7447 si = gsi_last_bb (e->dest);
7448 l2 = NULL_TREE;
7449 if (gsi_end_p (si)
7450 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7451 l2 = gimple_block_label (e->dest);
7452 else
7453 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7455 si = gsi_last_bb (e->dest);
7456 if (gsi_end_p (si)
7457 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7459 l2 = gimple_block_label (e->dest);
7460 break;
7464 if (exit_reachable)
7465 default_bb = create_empty_bb (l1_bb->prev_bb);
7466 else
7467 default_bb = create_empty_bb (l0_bb);
7469 /* We will build a switch() with enough cases for all the
7470 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7471 and a default case to abort if something goes wrong. */
7472 len = EDGE_COUNT (l0_bb->succs);
7474 /* Use vec::quick_push on label_vec throughout, since we know the size
7475 in advance. */
7476 auto_vec<tree> label_vec (len);
7478 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7479 GIMPLE_OMP_SECTIONS statement. */
7480 si = gsi_last_bb (entry_bb);
7481 sections_stmt = gsi_stmt (si);
7482 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7483 vin = gimple_omp_sections_control (sections_stmt);
7484 if (!is_combined_parallel (region))
7486 /* If we are not inside a combined parallel+sections region,
7487 call GOMP_sections_start. */
7488 t = build_int_cst (unsigned_type_node, len - 1);
7489 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7490 stmt = gimple_build_call (u, 1, t);
7492 else
7494 /* Otherwise, call GOMP_sections_next. */
7495 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7496 stmt = gimple_build_call (u, 0);
7498 gimple_call_set_lhs (stmt, vin);
7499 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7500 gsi_remove (&si, true);
7502 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7503 L0_BB. */
7504 switch_si = gsi_last_bb (l0_bb);
7505 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
7506 if (exit_reachable)
7508 cont = as_a <gimple_omp_continue> (last_stmt (l1_bb));
7509 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
7510 vmain = gimple_omp_continue_control_use (cont);
7511 vnext = gimple_omp_continue_control_def (cont);
7513 else
7515 vmain = vin;
7516 vnext = NULL_TREE;
7519 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
7520 label_vec.quick_push (t);
7521 i = 1;
7523 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7524 for (inner = region->inner, casei = 1;
7525 inner;
7526 inner = inner->next, i++, casei++)
7528 basic_block s_entry_bb, s_exit_bb;
7530 /* Skip optional reduction region. */
7531 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7533 --i;
7534 --casei;
7535 continue;
7538 s_entry_bb = inner->entry;
7539 s_exit_bb = inner->exit;
7541 t = gimple_block_label (s_entry_bb);
7542 u = build_int_cst (unsigned_type_node, casei);
7543 u = build_case_label (u, NULL, t);
7544 label_vec.quick_push (u);
7546 si = gsi_last_bb (s_entry_bb);
7547 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7548 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7549 gsi_remove (&si, true);
7550 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7552 if (s_exit_bb == NULL)
7553 continue;
7555 si = gsi_last_bb (s_exit_bb);
7556 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7557 gsi_remove (&si, true);
7559 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7562 /* Error handling code goes in DEFAULT_BB. */
7563 t = gimple_block_label (default_bb);
7564 u = build_case_label (NULL, NULL, t);
7565 make_edge (l0_bb, default_bb, 0);
7566 add_bb_to_loop (default_bb, current_loops->tree_root);
7568 stmt = gimple_build_switch (vmain, u, label_vec);
7569 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7570 gsi_remove (&switch_si, true);
7572 si = gsi_start_bb (default_bb);
7573 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7574 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7576 if (exit_reachable)
7578 tree bfn_decl;
7580 /* Code to get the next section goes in L1_BB. */
7581 si = gsi_last_bb (l1_bb);
7582 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7584 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7585 stmt = gimple_build_call (bfn_decl, 0);
7586 gimple_call_set_lhs (stmt, vnext);
7587 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7588 gsi_remove (&si, true);
7590 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7593 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7594 si = gsi_last_bb (l2_bb);
7595 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7596 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7597 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7598 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7599 else
7600 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7601 stmt = gimple_build_call (t, 0);
7602 if (gimple_omp_return_lhs (gsi_stmt (si)))
7603 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7604 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7605 gsi_remove (&si, true);
7607 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7611 /* Expand code for an OpenMP single directive. We've already expanded
7612 much of the code, here we simply place the GOMP_barrier call. */
7614 static void
7615 expand_omp_single (struct omp_region *region)
7617 basic_block entry_bb, exit_bb;
7618 gimple_stmt_iterator si;
7620 entry_bb = region->entry;
7621 exit_bb = region->exit;
7623 si = gsi_last_bb (entry_bb);
7624 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7625 gsi_remove (&si, true);
7626 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7628 si = gsi_last_bb (exit_bb);
7629 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7631 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7632 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7634 gsi_remove (&si, true);
7635 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7639 /* Generic expansion for OpenMP synchronization directives: master,
7640 ordered and critical. All we need to do here is remove the entry
7641 and exit markers for REGION. */
7643 static void
7644 expand_omp_synch (struct omp_region *region)
7646 basic_block entry_bb, exit_bb;
7647 gimple_stmt_iterator si;
7649 entry_bb = region->entry;
7650 exit_bb = region->exit;
7652 si = gsi_last_bb (entry_bb);
7653 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7654 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7655 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7656 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7657 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7658 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7659 gsi_remove (&si, true);
7660 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7662 if (exit_bb)
7664 si = gsi_last_bb (exit_bb);
7665 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7666 gsi_remove (&si, true);
7667 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7671 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7672 operation as a normal volatile load. */
7674 static bool
7675 expand_omp_atomic_load (basic_block load_bb, tree addr,
7676 tree loaded_val, int index)
7678 enum built_in_function tmpbase;
7679 gimple_stmt_iterator gsi;
7680 basic_block store_bb;
7681 location_t loc;
7682 gimple stmt;
7683 tree decl, call, type, itype;
7685 gsi = gsi_last_bb (load_bb);
7686 stmt = gsi_stmt (gsi);
7687 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7688 loc = gimple_location (stmt);
7690 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7691 is smaller than word size, then expand_atomic_load assumes that the load
7692 is atomic. We could avoid the builtin entirely in this case. */
7694 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7695 decl = builtin_decl_explicit (tmpbase);
7696 if (decl == NULL_TREE)
7697 return false;
7699 type = TREE_TYPE (loaded_val);
7700 itype = TREE_TYPE (TREE_TYPE (decl));
7702 call = build_call_expr_loc (loc, decl, 2, addr,
7703 build_int_cst (NULL,
7704 gimple_omp_atomic_seq_cst_p (stmt)
7705 ? MEMMODEL_SEQ_CST
7706 : MEMMODEL_RELAXED));
7707 if (!useless_type_conversion_p (type, itype))
7708 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7709 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7711 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7712 gsi_remove (&gsi, true);
7714 store_bb = single_succ (load_bb);
7715 gsi = gsi_last_bb (store_bb);
7716 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7717 gsi_remove (&gsi, true);
7719 if (gimple_in_ssa_p (cfun))
7720 update_ssa (TODO_update_ssa_no_phi);
7722 return true;
7725 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7726 operation as a normal volatile store. */
7728 static bool
7729 expand_omp_atomic_store (basic_block load_bb, tree addr,
7730 tree loaded_val, tree stored_val, int index)
7732 enum built_in_function tmpbase;
7733 gimple_stmt_iterator gsi;
7734 basic_block store_bb = single_succ (load_bb);
7735 location_t loc;
7736 gimple stmt;
7737 tree decl, call, type, itype;
7738 enum machine_mode imode;
7739 bool exchange;
7741 gsi = gsi_last_bb (load_bb);
7742 stmt = gsi_stmt (gsi);
7743 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7745 /* If the load value is needed, then this isn't a store but an exchange. */
7746 exchange = gimple_omp_atomic_need_value_p (stmt);
7748 gsi = gsi_last_bb (store_bb);
7749 stmt = gsi_stmt (gsi);
7750 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7751 loc = gimple_location (stmt);
7753 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7754 is smaller than word size, then expand_atomic_store assumes that the store
7755 is atomic. We could avoid the builtin entirely in this case. */
7757 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7758 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7759 decl = builtin_decl_explicit (tmpbase);
7760 if (decl == NULL_TREE)
7761 return false;
7763 type = TREE_TYPE (stored_val);
7765 /* Dig out the type of the function's second argument. */
7766 itype = TREE_TYPE (decl);
7767 itype = TYPE_ARG_TYPES (itype);
7768 itype = TREE_CHAIN (itype);
7769 itype = TREE_VALUE (itype);
7770 imode = TYPE_MODE (itype);
7772 if (exchange && !can_atomic_exchange_p (imode, true))
7773 return false;
7775 if (!useless_type_conversion_p (itype, type))
7776 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7777 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7778 build_int_cst (NULL,
7779 gimple_omp_atomic_seq_cst_p (stmt)
7780 ? MEMMODEL_SEQ_CST
7781 : MEMMODEL_RELAXED));
7782 if (exchange)
7784 if (!useless_type_conversion_p (type, itype))
7785 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7786 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7789 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7790 gsi_remove (&gsi, true);
7792 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7793 gsi = gsi_last_bb (load_bb);
7794 gsi_remove (&gsi, true);
7796 if (gimple_in_ssa_p (cfun))
7797 update_ssa (TODO_update_ssa_no_phi);
7799 return true;
7802 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7803 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7804 size of the data type, and thus usable to find the index of the builtin
7805 decl. Returns false if the expression is not of the proper form. */
7807 static bool
7808 expand_omp_atomic_fetch_op (basic_block load_bb,
7809 tree addr, tree loaded_val,
7810 tree stored_val, int index)
7812 enum built_in_function oldbase, newbase, tmpbase;
7813 tree decl, itype, call;
7814 tree lhs, rhs;
7815 basic_block store_bb = single_succ (load_bb);
7816 gimple_stmt_iterator gsi;
7817 gimple stmt;
7818 location_t loc;
7819 enum tree_code code;
7820 bool need_old, need_new;
7821 enum machine_mode imode;
7822 bool seq_cst;
7824 /* We expect to find the following sequences:
7826 load_bb:
7827 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7829 store_bb:
7830 val = tmp OP something; (or: something OP tmp)
7831 GIMPLE_OMP_STORE (val)
7833 ???FIXME: Allow a more flexible sequence.
7834 Perhaps use data flow to pick the statements.
7838 gsi = gsi_after_labels (store_bb);
7839 stmt = gsi_stmt (gsi);
7840 loc = gimple_location (stmt);
7841 if (!is_gimple_assign (stmt))
7842 return false;
7843 gsi_next (&gsi);
7844 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7845 return false;
7846 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7847 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7848 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7849 gcc_checking_assert (!need_old || !need_new);
7851 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7852 return false;
7854 /* Check for one of the supported fetch-op operations. */
7855 code = gimple_assign_rhs_code (stmt);
7856 switch (code)
7858 case PLUS_EXPR:
7859 case POINTER_PLUS_EXPR:
7860 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7861 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7862 break;
7863 case MINUS_EXPR:
7864 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7865 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7866 break;
7867 case BIT_AND_EXPR:
7868 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7869 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7870 break;
7871 case BIT_IOR_EXPR:
7872 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7873 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7874 break;
7875 case BIT_XOR_EXPR:
7876 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7877 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7878 break;
7879 default:
7880 return false;
7883 /* Make sure the expression is of the proper form. */
7884 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7885 rhs = gimple_assign_rhs2 (stmt);
7886 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7887 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7888 rhs = gimple_assign_rhs1 (stmt);
7889 else
7890 return false;
7892 tmpbase = ((enum built_in_function)
7893 ((need_new ? newbase : oldbase) + index + 1));
7894 decl = builtin_decl_explicit (tmpbase);
7895 if (decl == NULL_TREE)
7896 return false;
7897 itype = TREE_TYPE (TREE_TYPE (decl));
7898 imode = TYPE_MODE (itype);
7900 /* We could test all of the various optabs involved, but the fact of the
7901 matter is that (with the exception of i486 vs i586 and xadd) all targets
7902 that support any atomic operaton optab also implements compare-and-swap.
7903 Let optabs.c take care of expanding any compare-and-swap loop. */
7904 if (!can_compare_and_swap_p (imode, true))
7905 return false;
7907 gsi = gsi_last_bb (load_bb);
7908 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7910 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7911 It only requires that the operation happen atomically. Thus we can
7912 use the RELAXED memory model. */
7913 call = build_call_expr_loc (loc, decl, 3, addr,
7914 fold_convert_loc (loc, itype, rhs),
7915 build_int_cst (NULL,
7916 seq_cst ? MEMMODEL_SEQ_CST
7917 : MEMMODEL_RELAXED));
7919 if (need_old || need_new)
7921 lhs = need_old ? loaded_val : stored_val;
7922 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7923 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7925 else
7926 call = fold_convert_loc (loc, void_type_node, call);
7927 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7928 gsi_remove (&gsi, true);
7930 gsi = gsi_last_bb (store_bb);
7931 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7932 gsi_remove (&gsi, true);
7933 gsi = gsi_last_bb (store_bb);
7934 gsi_remove (&gsi, true);
7936 if (gimple_in_ssa_p (cfun))
7937 update_ssa (TODO_update_ssa_no_phi);
7939 return true;
7942 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7944 oldval = *addr;
7945 repeat:
7946 newval = rhs; // with oldval replacing *addr in rhs
7947 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7948 if (oldval != newval)
7949 goto repeat;
7951 INDEX is log2 of the size of the data type, and thus usable to find the
7952 index of the builtin decl. */
7954 static bool
7955 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7956 tree addr, tree loaded_val, tree stored_val,
7957 int index)
7959 tree loadedi, storedi, initial, new_storedi, old_vali;
7960 tree type, itype, cmpxchg, iaddr;
7961 gimple_stmt_iterator si;
7962 basic_block loop_header = single_succ (load_bb);
7963 gimple phi, stmt;
7964 edge e;
7965 enum built_in_function fncode;
7967 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7968 order to use the RELAXED memory model effectively. */
7969 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7970 + index + 1);
7971 cmpxchg = builtin_decl_explicit (fncode);
7972 if (cmpxchg == NULL_TREE)
7973 return false;
7974 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7975 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7977 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7978 return false;
7980 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7981 si = gsi_last_bb (load_bb);
7982 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7984 /* For floating-point values, we'll need to view-convert them to integers
7985 so that we can perform the atomic compare and swap. Simplify the
7986 following code by always setting up the "i"ntegral variables. */
7987 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7989 tree iaddr_val;
7991 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7992 true), NULL);
7993 iaddr_val
7994 = force_gimple_operand_gsi (&si,
7995 fold_convert (TREE_TYPE (iaddr), addr),
7996 false, NULL_TREE, true, GSI_SAME_STMT);
7997 stmt = gimple_build_assign (iaddr, iaddr_val);
7998 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7999 loadedi = create_tmp_var (itype, NULL);
8000 if (gimple_in_ssa_p (cfun))
8001 loadedi = make_ssa_name (loadedi, NULL);
8003 else
8005 iaddr = addr;
8006 loadedi = loaded_val;
8009 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
8010 tree loaddecl = builtin_decl_explicit (fncode);
8011 if (loaddecl)
8012 initial
8013 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
8014 build_call_expr (loaddecl, 2, iaddr,
8015 build_int_cst (NULL_TREE,
8016 MEMMODEL_RELAXED)));
8017 else
8018 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
8019 build_int_cst (TREE_TYPE (iaddr), 0));
8021 initial
8022 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
8023 GSI_SAME_STMT);
8025 /* Move the value to the LOADEDI temporary. */
8026 if (gimple_in_ssa_p (cfun))
8028 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
8029 phi = create_phi_node (loadedi, loop_header);
8030 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
8031 initial);
8033 else
8034 gsi_insert_before (&si,
8035 gimple_build_assign (loadedi, initial),
8036 GSI_SAME_STMT);
8037 if (loadedi != loaded_val)
8039 gimple_stmt_iterator gsi2;
8040 tree x;
8042 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
8043 gsi2 = gsi_start_bb (loop_header);
8044 if (gimple_in_ssa_p (cfun))
8046 gimple stmt;
8047 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8048 true, GSI_SAME_STMT);
8049 stmt = gimple_build_assign (loaded_val, x);
8050 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
8052 else
8054 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
8055 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8056 true, GSI_SAME_STMT);
8059 gsi_remove (&si, true);
8061 si = gsi_last_bb (store_bb);
8062 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8064 if (iaddr == addr)
8065 storedi = stored_val;
8066 else
8067 storedi =
8068 force_gimple_operand_gsi (&si,
8069 build1 (VIEW_CONVERT_EXPR, itype,
8070 stored_val), true, NULL_TREE, true,
8071 GSI_SAME_STMT);
8073 /* Build the compare&swap statement. */
8074 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
8075 new_storedi = force_gimple_operand_gsi (&si,
8076 fold_convert (TREE_TYPE (loadedi),
8077 new_storedi),
8078 true, NULL_TREE,
8079 true, GSI_SAME_STMT);
8081 if (gimple_in_ssa_p (cfun))
8082 old_vali = loadedi;
8083 else
8085 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
8086 stmt = gimple_build_assign (old_vali, loadedi);
8087 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8089 stmt = gimple_build_assign (loadedi, new_storedi);
8090 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8093 /* Note that we always perform the comparison as an integer, even for
8094 floating point. This allows the atomic operation to properly
8095 succeed even with NaNs and -0.0. */
8096 stmt = gimple_build_cond_empty
8097 (build2 (NE_EXPR, boolean_type_node,
8098 new_storedi, old_vali));
8099 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8101 /* Update cfg. */
8102 e = single_succ_edge (store_bb);
8103 e->flags &= ~EDGE_FALLTHRU;
8104 e->flags |= EDGE_FALSE_VALUE;
8106 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
8108 /* Copy the new value to loadedi (we already did that before the condition
8109 if we are not in SSA). */
8110 if (gimple_in_ssa_p (cfun))
8112 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
8113 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
8116 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
8117 gsi_remove (&si, true);
8119 struct loop *loop = alloc_loop ();
8120 loop->header = loop_header;
8121 loop->latch = store_bb;
8122 add_loop (loop, loop_header->loop_father);
8124 if (gimple_in_ssa_p (cfun))
8125 update_ssa (TODO_update_ssa_no_phi);
8127 return true;
8130 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8132 GOMP_atomic_start ();
8133 *addr = rhs;
8134 GOMP_atomic_end ();
8136 The result is not globally atomic, but works so long as all parallel
8137 references are within #pragma omp atomic directives. According to
8138 responses received from omp@openmp.org, appears to be within spec.
8139 Which makes sense, since that's how several other compilers handle
8140 this situation as well.
8141 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
8142 expanding. STORED_VAL is the operand of the matching
8143 GIMPLE_OMP_ATOMIC_STORE.
8145 We replace
8146 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
8147 loaded_val = *addr;
8149 and replace
8150 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
8151 *addr = stored_val;
8154 static bool
8155 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
8156 tree addr, tree loaded_val, tree stored_val)
8158 gimple_stmt_iterator si;
8159 gimple stmt;
8160 tree t;
8162 si = gsi_last_bb (load_bb);
8163 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8165 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
8166 t = build_call_expr (t, 0);
8167 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8169 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
8170 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8171 gsi_remove (&si, true);
8173 si = gsi_last_bb (store_bb);
8174 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8176 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
8177 stored_val);
8178 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8180 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
8181 t = build_call_expr (t, 0);
8182 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8183 gsi_remove (&si, true);
8185 if (gimple_in_ssa_p (cfun))
8186 update_ssa (TODO_update_ssa_no_phi);
8187 return true;
8190 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
8191 using expand_omp_atomic_fetch_op. If it failed, we try to
8192 call expand_omp_atomic_pipeline, and if it fails too, the
8193 ultimate fallback is wrapping the operation in a mutex
8194 (expand_omp_atomic_mutex). REGION is the atomic region built
8195 by build_omp_regions_1(). */
8197 static void
8198 expand_omp_atomic (struct omp_region *region)
8200 basic_block load_bb = region->entry, store_bb = region->exit;
8201 gimple_omp_atomic_load load =
8202 as_a <gimple_omp_atomic_load> (last_stmt (load_bb));
8203 gimple_omp_atomic_store store =
8204 as_a <gimple_omp_atomic_store> (last_stmt (store_bb));
8205 tree loaded_val = gimple_omp_atomic_load_lhs (load);
8206 tree addr = gimple_omp_atomic_load_rhs (load);
8207 tree stored_val = gimple_omp_atomic_store_val (store);
8208 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8209 HOST_WIDE_INT index;
8211 /* Make sure the type is one of the supported sizes. */
8212 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
8213 index = exact_log2 (index);
8214 if (index >= 0 && index <= 4)
8216 unsigned int align = TYPE_ALIGN_UNIT (type);
8218 /* __sync builtins require strict data alignment. */
8219 if (exact_log2 (align) >= index)
8221 /* Atomic load. */
8222 if (loaded_val == stored_val
8223 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8224 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8225 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8226 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
8227 return;
8229 /* Atomic store. */
8230 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8231 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8232 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8233 && store_bb == single_succ (load_bb)
8234 && first_stmt (store_bb) == store
8235 && expand_omp_atomic_store (load_bb, addr, loaded_val,
8236 stored_val, index))
8237 return;
8239 /* When possible, use specialized atomic update functions. */
8240 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
8241 && store_bb == single_succ (load_bb)
8242 && expand_omp_atomic_fetch_op (load_bb, addr,
8243 loaded_val, stored_val, index))
8244 return;
8246 /* If we don't have specialized __sync builtins, try and implement
8247 as a compare and swap loop. */
8248 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
8249 loaded_val, stored_val, index))
8250 return;
8254 /* The ultimate fallback is wrapping the operation in a mutex. */
8255 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
8259 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
8261 static void
8262 expand_omp_target (struct omp_region *region)
8264 basic_block entry_bb, exit_bb, new_bb;
8265 struct function *child_cfun = NULL;
8266 tree child_fn = NULL_TREE, block, t;
8267 gimple_stmt_iterator gsi;
8268 gimple entry_stmt, stmt;
8269 edge e;
8271 entry_stmt = last_stmt (region->entry);
8272 new_bb = region->entry;
8273 int kind = gimple_omp_target_kind (entry_stmt);
8274 if (kind == GF_OMP_TARGET_KIND_REGION)
8276 child_fn = gimple_omp_target_child_fn (entry_stmt);
8277 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
8280 entry_bb = region->entry;
8281 exit_bb = region->exit;
8283 if (kind == GF_OMP_TARGET_KIND_REGION)
8285 unsigned srcidx, dstidx, num;
8287 /* If the target region needs data sent from the parent
8288 function, then the very first statement (except possible
8289 tree profile counter updates) of the parallel body
8290 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
8291 &.OMP_DATA_O is passed as an argument to the child function,
8292 we need to replace it with the argument as seen by the child
8293 function.
8295 In most cases, this will end up being the identity assignment
8296 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
8297 a function call that has been inlined, the original PARM_DECL
8298 .OMP_DATA_I may have been converted into a different local
8299 variable. In which case, we need to keep the assignment. */
8300 if (gimple_omp_target_data_arg (entry_stmt))
8302 basic_block entry_succ_bb = single_succ (entry_bb);
8303 gimple_stmt_iterator gsi;
8304 tree arg;
8305 gimple tgtcopy_stmt = NULL;
8306 tree sender
8307 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
8309 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
8311 gcc_assert (!gsi_end_p (gsi));
8312 stmt = gsi_stmt (gsi);
8313 if (gimple_code (stmt) != GIMPLE_ASSIGN)
8314 continue;
8316 if (gimple_num_ops (stmt) == 2)
8318 tree arg = gimple_assign_rhs1 (stmt);
8320 /* We're ignoring the subcode because we're
8321 effectively doing a STRIP_NOPS. */
8323 if (TREE_CODE (arg) == ADDR_EXPR
8324 && TREE_OPERAND (arg, 0) == sender)
8326 tgtcopy_stmt = stmt;
8327 break;
8332 gcc_assert (tgtcopy_stmt != NULL);
8333 arg = DECL_ARGUMENTS (child_fn);
8335 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
8336 gsi_remove (&gsi, true);
8339 /* Declare local variables needed in CHILD_CFUN. */
8340 block = DECL_INITIAL (child_fn);
8341 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
8342 /* The gimplifier could record temporaries in target block
8343 rather than in containing function's local_decls chain,
8344 which would mean cgraph missed finalizing them. Do it now. */
8345 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
8346 if (TREE_CODE (t) == VAR_DECL
8347 && TREE_STATIC (t)
8348 && !DECL_EXTERNAL (t))
8349 varpool_node::finalize_decl (t);
8350 DECL_SAVED_TREE (child_fn) = NULL;
8351 /* We'll create a CFG for child_fn, so no gimple body is needed. */
8352 gimple_set_body (child_fn, NULL);
8353 TREE_USED (block) = 1;
8355 /* Reset DECL_CONTEXT on function arguments. */
8356 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
8357 DECL_CONTEXT (t) = child_fn;
8359 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
8360 so that it can be moved to the child function. */
8361 gsi = gsi_last_bb (entry_bb);
8362 stmt = gsi_stmt (gsi);
8363 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
8364 && gimple_omp_target_kind (stmt)
8365 == GF_OMP_TARGET_KIND_REGION);
8366 gsi_remove (&gsi, true);
8367 e = split_block (entry_bb, stmt);
8368 entry_bb = e->dest;
8369 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8371 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
8372 if (exit_bb)
8374 gsi = gsi_last_bb (exit_bb);
8375 gcc_assert (!gsi_end_p (gsi)
8376 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
8377 stmt = gimple_build_return (NULL);
8378 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8379 gsi_remove (&gsi, true);
8382 /* Move the target region into CHILD_CFUN. */
8384 block = gimple_block (entry_stmt);
8386 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
8387 if (exit_bb)
8388 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
8389 /* When the OMP expansion process cannot guarantee an up-to-date
8390 loop tree arrange for the child function to fixup loops. */
8391 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8392 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8394 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8395 num = vec_safe_length (child_cfun->local_decls);
8396 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8398 t = (*child_cfun->local_decls)[srcidx];
8399 if (DECL_CONTEXT (t) == cfun->decl)
8400 continue;
8401 if (srcidx != dstidx)
8402 (*child_cfun->local_decls)[dstidx] = t;
8403 dstidx++;
8405 if (dstidx != num)
8406 vec_safe_truncate (child_cfun->local_decls, dstidx);
8408 /* Inform the callgraph about the new function. */
8409 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
8410 cgraph_node::add_new_function (child_fn, true);
8412 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8413 fixed in a following pass. */
8414 push_cfun (child_cfun);
8415 cgraph_edge::rebuild_edges ();
8417 /* Some EH regions might become dead, see PR34608. If
8418 pass_cleanup_cfg isn't the first pass to happen with the
8419 new child, these dead EH edges might cause problems.
8420 Clean them up now. */
8421 if (flag_exceptions)
8423 basic_block bb;
8424 bool changed = false;
8426 FOR_EACH_BB_FN (bb, cfun)
8427 changed |= gimple_purge_dead_eh_edges (bb);
8428 if (changed)
8429 cleanup_tree_cfg ();
8431 pop_cfun ();
8434 /* Emit a library call to launch the target region, or do data
8435 transfers. */
8436 tree t1, t2, t3, t4, device, cond, c, clauses;
8437 enum built_in_function start_ix;
8438 location_t clause_loc;
8440 clauses = gimple_omp_target_clauses (entry_stmt);
8442 if (kind == GF_OMP_TARGET_KIND_REGION)
8443 start_ix = BUILT_IN_GOMP_TARGET;
8444 else if (kind == GF_OMP_TARGET_KIND_DATA)
8445 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8446 else
8447 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8449 /* By default, the value of DEVICE is -1 (let runtime library choose)
8450 and there is no conditional. */
8451 cond = NULL_TREE;
8452 device = build_int_cst (integer_type_node, -1);
8454 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
8455 if (c)
8456 cond = OMP_CLAUSE_IF_EXPR (c);
8458 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
8459 if (c)
8461 device = OMP_CLAUSE_DEVICE_ID (c);
8462 clause_loc = OMP_CLAUSE_LOCATION (c);
8464 else
8465 clause_loc = gimple_location (entry_stmt);
8467 /* Ensure 'device' is of the correct type. */
8468 device = fold_convert_loc (clause_loc, integer_type_node, device);
8470 /* If we found the clause 'if (cond)', build
8471 (cond ? device : -2). */
8472 if (cond)
8474 cond = gimple_boolify (cond);
8476 basic_block cond_bb, then_bb, else_bb;
8477 edge e;
8478 tree tmp_var;
8480 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
8481 if (kind != GF_OMP_TARGET_KIND_REGION)
8483 gsi = gsi_last_bb (new_bb);
8484 gsi_prev (&gsi);
8485 e = split_block (new_bb, gsi_stmt (gsi));
8487 else
8488 e = split_block (new_bb, NULL);
8489 cond_bb = e->src;
8490 new_bb = e->dest;
8491 remove_edge (e);
8493 then_bb = create_empty_bb (cond_bb);
8494 else_bb = create_empty_bb (then_bb);
8495 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
8496 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
8498 stmt = gimple_build_cond_empty (cond);
8499 gsi = gsi_last_bb (cond_bb);
8500 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8502 gsi = gsi_start_bb (then_bb);
8503 stmt = gimple_build_assign (tmp_var, device);
8504 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8506 gsi = gsi_start_bb (else_bb);
8507 stmt = gimple_build_assign (tmp_var,
8508 build_int_cst (integer_type_node, -2));
8509 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8511 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
8512 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
8513 add_bb_to_loop (then_bb, cond_bb->loop_father);
8514 add_bb_to_loop (else_bb, cond_bb->loop_father);
8515 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
8516 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
8518 device = tmp_var;
8521 gsi = gsi_last_bb (new_bb);
8522 t = gimple_omp_target_data_arg (entry_stmt);
8523 if (t == NULL)
8525 t1 = size_zero_node;
8526 t2 = build_zero_cst (ptr_type_node);
8527 t3 = t2;
8528 t4 = t2;
8530 else
8532 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8533 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8534 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8535 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8536 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8539 gimple g;
8540 /* FIXME: This will be address of
8541 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8542 symbol, as soon as the linker plugin is able to create it for us. */
8543 tree openmp_target = build_zero_cst (ptr_type_node);
8544 if (kind == GF_OMP_TARGET_KIND_REGION)
8546 tree fnaddr = build_fold_addr_expr (child_fn);
8547 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8548 device, fnaddr, openmp_target, t1, t2, t3, t4);
8550 else
8551 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8552 device, openmp_target, t1, t2, t3, t4);
8553 gimple_set_location (g, gimple_location (entry_stmt));
8554 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8555 if (kind != GF_OMP_TARGET_KIND_REGION)
8557 g = gsi_stmt (gsi);
8558 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8559 gsi_remove (&gsi, true);
8561 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8563 gsi = gsi_last_bb (region->exit);
8564 g = gsi_stmt (gsi);
8565 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8566 gsi_remove (&gsi, true);
8571 /* Expand the parallel region tree rooted at REGION. Expansion
8572 proceeds in depth-first order. Innermost regions are expanded
8573 first. This way, parallel regions that require a new function to
8574 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8575 internal dependencies in their body. */
8577 static void
8578 expand_omp (struct omp_region *region)
8580 while (region)
8582 location_t saved_location;
8583 gimple inner_stmt = NULL;
8585 /* First, determine whether this is a combined parallel+workshare
8586 region. */
8587 if (region->type == GIMPLE_OMP_PARALLEL)
8588 determine_parallel_type (region);
8590 if (region->type == GIMPLE_OMP_FOR
8591 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8592 inner_stmt = last_stmt (region->inner->entry);
8594 if (region->inner)
8595 expand_omp (region->inner);
8597 saved_location = input_location;
8598 if (gimple_has_location (last_stmt (region->entry)))
8599 input_location = gimple_location (last_stmt (region->entry));
8601 switch (region->type)
8603 case GIMPLE_OMP_PARALLEL:
8604 case GIMPLE_OMP_TASK:
8605 expand_omp_taskreg (region);
8606 break;
8608 case GIMPLE_OMP_FOR:
8609 expand_omp_for (region, inner_stmt);
8610 break;
8612 case GIMPLE_OMP_SECTIONS:
8613 expand_omp_sections (region);
8614 break;
8616 case GIMPLE_OMP_SECTION:
8617 /* Individual omp sections are handled together with their
8618 parent GIMPLE_OMP_SECTIONS region. */
8619 break;
8621 case GIMPLE_OMP_SINGLE:
8622 expand_omp_single (region);
8623 break;
8625 case GIMPLE_OMP_MASTER:
8626 case GIMPLE_OMP_TASKGROUP:
8627 case GIMPLE_OMP_ORDERED:
8628 case GIMPLE_OMP_CRITICAL:
8629 case GIMPLE_OMP_TEAMS:
8630 expand_omp_synch (region);
8631 break;
8633 case GIMPLE_OMP_ATOMIC_LOAD:
8634 expand_omp_atomic (region);
8635 break;
8637 case GIMPLE_OMP_TARGET:
8638 expand_omp_target (region);
8639 break;
8641 default:
8642 gcc_unreachable ();
8645 input_location = saved_location;
8646 region = region->next;
8651 /* Helper for build_omp_regions. Scan the dominator tree starting at
8652 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8653 true, the function ends once a single tree is built (otherwise, whole
8654 forest of OMP constructs may be built). */
8656 static void
8657 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8658 bool single_tree)
8660 gimple_stmt_iterator gsi;
8661 gimple stmt;
8662 basic_block son;
8664 gsi = gsi_last_bb (bb);
8665 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8667 struct omp_region *region;
8668 enum gimple_code code;
8670 stmt = gsi_stmt (gsi);
8671 code = gimple_code (stmt);
8672 if (code == GIMPLE_OMP_RETURN)
8674 /* STMT is the return point out of region PARENT. Mark it
8675 as the exit point and make PARENT the immediately
8676 enclosing region. */
8677 gcc_assert (parent);
8678 region = parent;
8679 region->exit = bb;
8680 parent = parent->outer;
8682 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8684 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8685 GIMPLE_OMP_RETURN, but matches with
8686 GIMPLE_OMP_ATOMIC_LOAD. */
8687 gcc_assert (parent);
8688 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8689 region = parent;
8690 region->exit = bb;
8691 parent = parent->outer;
8694 else if (code == GIMPLE_OMP_CONTINUE)
8696 gcc_assert (parent);
8697 parent->cont = bb;
8699 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8701 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8702 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8705 else if (code == GIMPLE_OMP_TARGET
8706 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8707 new_omp_region (bb, code, parent);
8708 else
8710 /* Otherwise, this directive becomes the parent for a new
8711 region. */
8712 region = new_omp_region (bb, code, parent);
8713 parent = region;
8717 if (single_tree && !parent)
8718 return;
8720 for (son = first_dom_son (CDI_DOMINATORS, bb);
8721 son;
8722 son = next_dom_son (CDI_DOMINATORS, son))
8723 build_omp_regions_1 (son, parent, single_tree);
8726 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8727 root_omp_region. */
8729 static void
8730 build_omp_regions_root (basic_block root)
8732 gcc_assert (root_omp_region == NULL);
8733 build_omp_regions_1 (root, NULL, true);
8734 gcc_assert (root_omp_region != NULL);
8737 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8739 void
8740 omp_expand_local (basic_block head)
8742 build_omp_regions_root (head);
8743 if (dump_file && (dump_flags & TDF_DETAILS))
8745 fprintf (dump_file, "\nOMP region tree\n\n");
8746 dump_omp_region (dump_file, root_omp_region, 0);
8747 fprintf (dump_file, "\n");
8750 remove_exit_barriers (root_omp_region);
8751 expand_omp (root_omp_region);
8753 free_omp_regions ();
8756 /* Scan the CFG and build a tree of OMP regions. Return the root of
8757 the OMP region tree. */
8759 static void
8760 build_omp_regions (void)
8762 gcc_assert (root_omp_region == NULL);
8763 calculate_dominance_info (CDI_DOMINATORS);
8764 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8767 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8769 static unsigned int
8770 execute_expand_omp (void)
8772 build_omp_regions ();
8774 if (!root_omp_region)
8775 return 0;
8777 if (dump_file)
8779 fprintf (dump_file, "\nOMP region tree\n\n");
8780 dump_omp_region (dump_file, root_omp_region, 0);
8781 fprintf (dump_file, "\n");
8784 remove_exit_barriers (root_omp_region);
8786 expand_omp (root_omp_region);
8788 cleanup_tree_cfg ();
8790 free_omp_regions ();
8792 return 0;
8795 /* OMP expansion -- the default pass, run before creation of SSA form. */
8797 namespace {
8799 const pass_data pass_data_expand_omp =
8801 GIMPLE_PASS, /* type */
8802 "ompexp", /* name */
8803 OPTGROUP_NONE, /* optinfo_flags */
8804 TV_NONE, /* tv_id */
8805 PROP_gimple_any, /* properties_required */
8806 0, /* properties_provided */
8807 0, /* properties_destroyed */
8808 0, /* todo_flags_start */
8809 0, /* todo_flags_finish */
8812 class pass_expand_omp : public gimple_opt_pass
8814 public:
8815 pass_expand_omp (gcc::context *ctxt)
8816 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8819 /* opt_pass methods: */
8820 virtual bool gate (function *)
8822 return ((flag_openmp != 0 || flag_openmp_simd != 0
8823 || flag_cilkplus != 0) && !seen_error ());
8826 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8828 }; // class pass_expand_omp
8830 } // anon namespace
8832 gimple_opt_pass *
8833 make_pass_expand_omp (gcc::context *ctxt)
8835 return new pass_expand_omp (ctxt);
8838 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8840 /* If ctx is a worksharing context inside of a cancellable parallel
8841 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8842 and conditional branch to parallel's cancel_label to handle
8843 cancellation in the implicit barrier. */
8845 static void
8846 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8848 gimple omp_return = gimple_seq_last_stmt (*body);
8849 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8850 if (gimple_omp_return_nowait_p (omp_return))
8851 return;
8852 if (ctx->outer
8853 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8854 && ctx->outer->cancellable)
8856 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
8857 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
8858 tree lhs = create_tmp_var (c_bool_type, NULL);
8859 gimple_omp_return_set_lhs (omp_return, lhs);
8860 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8861 gimple g = gimple_build_cond (NE_EXPR, lhs,
8862 fold_convert (c_bool_type,
8863 boolean_false_node),
8864 ctx->outer->cancel_label, fallthru_label);
8865 gimple_seq_add_stmt (body, g);
8866 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8870 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8871 CTX is the enclosing OMP context for the current statement. */
8873 static void
8874 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8876 tree block, control;
8877 gimple_stmt_iterator tgsi;
8878 gimple stmt, t;
8879 gimple_bind new_stmt, bind;
8880 gimple_seq ilist, dlist, olist, new_body;
8882 stmt = gsi_stmt (*gsi_p);
8884 push_gimplify_context ();
8886 dlist = NULL;
8887 ilist = NULL;
8888 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8889 &ilist, &dlist, ctx, NULL);
8891 new_body = gimple_omp_body (stmt);
8892 gimple_omp_set_body (stmt, NULL);
8893 tgsi = gsi_start (new_body);
8894 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8896 omp_context *sctx;
8897 gimple sec_start;
8899 sec_start = gsi_stmt (tgsi);
8900 sctx = maybe_lookup_ctx (sec_start);
8901 gcc_assert (sctx);
8903 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8904 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8905 GSI_CONTINUE_LINKING);
8906 gimple_omp_set_body (sec_start, NULL);
8908 if (gsi_one_before_end_p (tgsi))
8910 gimple_seq l = NULL;
8911 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8912 &l, ctx);
8913 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8914 gimple_omp_section_set_last (sec_start);
8917 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8918 GSI_CONTINUE_LINKING);
8921 block = make_node (BLOCK);
8922 bind = gimple_build_bind (NULL, new_body, block);
8924 olist = NULL;
8925 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
8927 block = make_node (BLOCK);
8928 new_stmt = gimple_build_bind (NULL, NULL, block);
8929 gsi_replace (gsi_p, new_stmt, true);
8931 pop_gimplify_context (new_stmt);
8932 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8933 BLOCK_VARS (block) = gimple_bind_vars (bind);
8934 if (BLOCK_VARS (block))
8935 TREE_USED (block) = 1;
8937 new_body = NULL;
8938 gimple_seq_add_seq (&new_body, ilist);
8939 gimple_seq_add_stmt (&new_body, stmt);
8940 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8941 gimple_seq_add_stmt (&new_body, bind);
8943 control = create_tmp_var (unsigned_type_node, ".section");
8944 t = gimple_build_omp_continue (control, control);
8945 gimple_omp_sections_set_control (stmt, control);
8946 gimple_seq_add_stmt (&new_body, t);
8948 gimple_seq_add_seq (&new_body, olist);
8949 if (ctx->cancellable)
8950 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
8951 gimple_seq_add_seq (&new_body, dlist);
8953 new_body = maybe_catch_exception (new_body);
8955 t = gimple_build_omp_return
8956 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8957 OMP_CLAUSE_NOWAIT));
8958 gimple_seq_add_stmt (&new_body, t);
8959 maybe_add_implicit_barrier_cancel (ctx, &new_body);
8961 gimple_bind_set_body (new_stmt, new_body);
8965 /* A subroutine of lower_omp_single. Expand the simple form of
8966 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8968 if (GOMP_single_start ())
8969 BODY;
8970 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8972 FIXME. It may be better to delay expanding the logic of this until
8973 pass_expand_omp. The expanded logic may make the job more difficult
8974 to a synchronization analysis pass. */
8976 static void
8977 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
8979 location_t loc = gimple_location (single_stmt);
8980 tree tlabel = create_artificial_label (loc);
8981 tree flabel = create_artificial_label (loc);
8982 gimple call, cond;
8983 tree lhs, decl;
8985 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
8986 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8987 call = gimple_build_call (decl, 0);
8988 gimple_call_set_lhs (call, lhs);
8989 gimple_seq_add_stmt (pre_p, call);
8991 cond = gimple_build_cond (EQ_EXPR, lhs,
8992 fold_convert_loc (loc, TREE_TYPE (lhs),
8993 boolean_true_node),
8994 tlabel, flabel);
8995 gimple_seq_add_stmt (pre_p, cond);
8996 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
8997 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8998 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
9002 /* A subroutine of lower_omp_single. Expand the simple form of
9003 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
9005 #pragma omp single copyprivate (a, b, c)
9007 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
9010 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
9012 BODY;
9013 copyout.a = a;
9014 copyout.b = b;
9015 copyout.c = c;
9016 GOMP_single_copy_end (&copyout);
9018 else
9020 a = copyout_p->a;
9021 b = copyout_p->b;
9022 c = copyout_p->c;
9024 GOMP_barrier ();
9027 FIXME. It may be better to delay expanding the logic of this until
9028 pass_expand_omp. The expanded logic may make the job more difficult
9029 to a synchronization analysis pass. */
9031 static void
9032 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
9034 tree ptr_type, t, l0, l1, l2, bfn_decl;
9035 gimple_seq copyin_seq;
9036 location_t loc = gimple_location (single_stmt);
9038 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
9040 ptr_type = build_pointer_type (ctx->record_type);
9041 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
9043 l0 = create_artificial_label (loc);
9044 l1 = create_artificial_label (loc);
9045 l2 = create_artificial_label (loc);
9047 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
9048 t = build_call_expr_loc (loc, bfn_decl, 0);
9049 t = fold_convert_loc (loc, ptr_type, t);
9050 gimplify_assign (ctx->receiver_decl, t, pre_p);
9052 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
9053 build_int_cst (ptr_type, 0));
9054 t = build3 (COND_EXPR, void_type_node, t,
9055 build_and_jump (&l0), build_and_jump (&l1));
9056 gimplify_and_add (t, pre_p);
9058 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
9060 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
9062 copyin_seq = NULL;
9063 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
9064 &copyin_seq, ctx);
9066 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9067 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
9068 t = build_call_expr_loc (loc, bfn_decl, 1, t);
9069 gimplify_and_add (t, pre_p);
9071 t = build_and_jump (&l2);
9072 gimplify_and_add (t, pre_p);
9074 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
9076 gimple_seq_add_seq (pre_p, copyin_seq);
9078 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
9082 /* Expand code for an OpenMP single directive. */
9084 static void
9085 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9087 tree block;
9088 gimple t, single_stmt = gsi_stmt (*gsi_p);
9089 gimple_bind bind;
9090 gimple_seq bind_body, bind_body_tail = NULL, dlist;
9092 push_gimplify_context ();
9094 block = make_node (BLOCK);
9095 bind = gimple_build_bind (NULL, NULL, block);
9096 gsi_replace (gsi_p, bind, true);
9097 bind_body = NULL;
9098 dlist = NULL;
9099 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
9100 &bind_body, &dlist, ctx, NULL);
9101 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
9103 gimple_seq_add_stmt (&bind_body, single_stmt);
9105 if (ctx->record_type)
9106 lower_omp_single_copy (single_stmt, &bind_body, ctx);
9107 else
9108 lower_omp_single_simple (single_stmt, &bind_body);
9110 gimple_omp_set_body (single_stmt, NULL);
9112 gimple_seq_add_seq (&bind_body, dlist);
9114 bind_body = maybe_catch_exception (bind_body);
9116 t = gimple_build_omp_return
9117 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
9118 OMP_CLAUSE_NOWAIT));
9119 gimple_seq_add_stmt (&bind_body_tail, t);
9120 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
9121 if (ctx->record_type)
9123 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
9124 tree clobber = build_constructor (ctx->record_type, NULL);
9125 TREE_THIS_VOLATILE (clobber) = 1;
9126 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
9127 clobber), GSI_SAME_STMT);
9129 gimple_seq_add_seq (&bind_body, bind_body_tail);
9130 gimple_bind_set_body (bind, bind_body);
9132 pop_gimplify_context (bind);
9134 gimple_bind_append_vars (bind, ctx->block_vars);
9135 BLOCK_VARS (block) = ctx->block_vars;
9136 if (BLOCK_VARS (block))
9137 TREE_USED (block) = 1;
9141 /* Expand code for an OpenMP master directive. */
9143 static void
9144 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9146 tree block, lab = NULL, x, bfn_decl;
9147 gimple stmt = gsi_stmt (*gsi_p);
9148 gimple_bind bind;
9149 location_t loc = gimple_location (stmt);
9150 gimple_seq tseq;
9152 push_gimplify_context ();
9154 block = make_node (BLOCK);
9155 bind = gimple_build_bind (NULL, NULL, block);
9156 gsi_replace (gsi_p, bind, true);
9157 gimple_bind_add_stmt (bind, stmt);
9159 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9160 x = build_call_expr_loc (loc, bfn_decl, 0);
9161 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
9162 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
9163 tseq = NULL;
9164 gimplify_and_add (x, &tseq);
9165 gimple_bind_add_seq (bind, tseq);
9167 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9168 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9169 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9170 gimple_omp_set_body (stmt, NULL);
9172 gimple_bind_add_stmt (bind, gimple_build_label (lab));
9174 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9176 pop_gimplify_context (bind);
9178 gimple_bind_append_vars (bind, ctx->block_vars);
9179 BLOCK_VARS (block) = ctx->block_vars;
9183 /* Expand code for an OpenMP taskgroup directive. */
9185 static void
9186 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9188 gimple stmt = gsi_stmt (*gsi_p), x;
9189 gimple_bind bind;
9190 tree block = make_node (BLOCK);
9192 bind = gimple_build_bind (NULL, NULL, block);
9193 gsi_replace (gsi_p, bind, true);
9194 gimple_bind_add_stmt (bind, stmt);
9196 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
9198 gimple_bind_add_stmt (bind, x);
9200 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9201 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9202 gimple_omp_set_body (stmt, NULL);
9204 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9206 gimple_bind_append_vars (bind, ctx->block_vars);
9207 BLOCK_VARS (block) = ctx->block_vars;
9211 /* Expand code for an OpenMP ordered directive. */
9213 static void
9214 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9216 tree block;
9217 gimple stmt = gsi_stmt (*gsi_p), x;
9218 gimple_bind bind;
9220 push_gimplify_context ();
9222 block = make_node (BLOCK);
9223 bind = gimple_build_bind (NULL, NULL, block);
9224 gsi_replace (gsi_p, bind, true);
9225 gimple_bind_add_stmt (bind, stmt);
9227 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
9229 gimple_bind_add_stmt (bind, x);
9231 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9232 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9233 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9234 gimple_omp_set_body (stmt, NULL);
9236 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
9237 gimple_bind_add_stmt (bind, x);
9239 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9241 pop_gimplify_context (bind);
9243 gimple_bind_append_vars (bind, ctx->block_vars);
9244 BLOCK_VARS (block) = gimple_bind_vars (bind);
9248 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
9249 substitution of a couple of function calls. But in the NAMED case,
9250 requires that languages coordinate a symbol name. It is therefore
9251 best put here in common code. */
9253 static GTY((param1_is (tree), param2_is (tree)))
9254 splay_tree critical_name_mutexes;
9256 static void
9257 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9259 tree block;
9260 tree name, lock, unlock;
9261 gimple_omp_critical stmt = as_a <gimple_omp_critical> (gsi_stmt (*gsi_p));
9262 gimple_bind bind;
9263 location_t loc = gimple_location (stmt);
9264 gimple_seq tbody;
9266 name = gimple_omp_critical_name (stmt);
9267 if (name)
9269 tree decl;
9270 splay_tree_node n;
9272 if (!critical_name_mutexes)
9273 critical_name_mutexes
9274 = splay_tree_new_ggc (splay_tree_compare_pointers,
9275 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9276 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9278 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
9279 if (n == NULL)
9281 char *new_str;
9283 decl = create_tmp_var_raw (ptr_type_node, NULL);
9285 new_str = ACONCAT ((".gomp_critical_user_",
9286 IDENTIFIER_POINTER (name), NULL));
9287 DECL_NAME (decl) = get_identifier (new_str);
9288 TREE_PUBLIC (decl) = 1;
9289 TREE_STATIC (decl) = 1;
9290 DECL_COMMON (decl) = 1;
9291 DECL_ARTIFICIAL (decl) = 1;
9292 DECL_IGNORED_P (decl) = 1;
9293 varpool_node::finalize_decl (decl);
9295 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
9296 (splay_tree_value) decl);
9298 else
9299 decl = (tree) n->value;
9301 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
9302 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
9304 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
9305 unlock = build_call_expr_loc (loc, unlock, 1,
9306 build_fold_addr_expr_loc (loc, decl));
9308 else
9310 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
9311 lock = build_call_expr_loc (loc, lock, 0);
9313 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
9314 unlock = build_call_expr_loc (loc, unlock, 0);
9317 push_gimplify_context ();
9319 block = make_node (BLOCK);
9320 bind = gimple_build_bind (NULL, NULL, block);
9321 gsi_replace (gsi_p, bind, true);
9322 gimple_bind_add_stmt (bind, stmt);
9324 tbody = gimple_bind_body (bind);
9325 gimplify_and_add (lock, &tbody);
9326 gimple_bind_set_body (bind, tbody);
9328 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9329 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9330 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9331 gimple_omp_set_body (stmt, NULL);
9333 tbody = gimple_bind_body (bind);
9334 gimplify_and_add (unlock, &tbody);
9335 gimple_bind_set_body (bind, tbody);
9337 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9339 pop_gimplify_context (bind);
9340 gimple_bind_append_vars (bind, ctx->block_vars);
9341 BLOCK_VARS (block) = gimple_bind_vars (bind);
9345 /* A subroutine of lower_omp_for. Generate code to emit the predicate
9346 for a lastprivate clause. Given a loop control predicate of (V
9347 cond N2), we gate the clause on (!(V cond N2)). The lowered form
9348 is appended to *DLIST, iterator initialization is appended to
9349 *BODY_P. */
9351 static void
9352 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
9353 gimple_seq *dlist, struct omp_context *ctx)
9355 tree clauses, cond, vinit;
9356 enum tree_code cond_code;
9357 gimple_seq stmts;
9359 cond_code = fd->loop.cond_code;
9360 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
9362 /* When possible, use a strict equality expression. This can let VRP
9363 type optimizations deduce the value and remove a copy. */
9364 if (tree_fits_shwi_p (fd->loop.step))
9366 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
9367 if (step == 1 || step == -1)
9368 cond_code = EQ_EXPR;
9371 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
9373 clauses = gimple_omp_for_clauses (fd->for_stmt);
9374 stmts = NULL;
9375 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
9376 if (!gimple_seq_empty_p (stmts))
9378 gimple_seq_add_seq (&stmts, *dlist);
9379 *dlist = stmts;
9381 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
9382 vinit = fd->loop.n1;
9383 if (cond_code == EQ_EXPR
9384 && tree_fits_shwi_p (fd->loop.n2)
9385 && ! integer_zerop (fd->loop.n2))
9386 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
9387 else
9388 vinit = unshare_expr (vinit);
9390 /* Initialize the iterator variable, so that threads that don't execute
9391 any iterations don't execute the lastprivate clauses by accident. */
9392 gimplify_assign (fd->loop.v, vinit, body_p);
9397 /* Lower code for an OpenMP loop directive. */
9399 static void
9400 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9402 tree *rhs_p, block;
9403 struct omp_for_data fd, *fdp = NULL;
9404 gimple_omp_for stmt = as_a <gimple_omp_for> (gsi_stmt (*gsi_p));
9405 gimple_bind new_stmt;
9406 gimple_seq omp_for_body, body, dlist;
9407 size_t i;
9409 push_gimplify_context ();
9411 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
9413 block = make_node (BLOCK);
9414 new_stmt = gimple_build_bind (NULL, NULL, block);
9415 /* Replace at gsi right away, so that 'stmt' is no member
9416 of a sequence anymore as we're going to add to to a different
9417 one below. */
9418 gsi_replace (gsi_p, new_stmt, true);
9420 /* Move declaration of temporaries in the loop body before we make
9421 it go away. */
9422 omp_for_body = gimple_omp_body (stmt);
9423 if (!gimple_seq_empty_p (omp_for_body)
9424 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
9426 gimple_bind inner_bind =
9427 as_a <gimple_bind> (gimple_seq_first_stmt (omp_for_body));
9428 tree vars = gimple_bind_vars (inner_bind);
9429 gimple_bind_append_vars (new_stmt, vars);
9430 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
9431 keep them on the inner_bind and it's block. */
9432 gimple_bind_set_vars (inner_bind, NULL_TREE);
9433 if (gimple_bind_block (inner_bind))
9434 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
9437 if (gimple_omp_for_combined_into_p (stmt))
9439 extract_omp_for_data (stmt, &fd, NULL);
9440 fdp = &fd;
9442 /* We need two temporaries with fd.loop.v type (istart/iend)
9443 and then (fd.collapse - 1) temporaries with the same
9444 type for count2 ... countN-1 vars if not constant. */
9445 size_t count = 2;
9446 tree type = fd.iter_type;
9447 if (fd.collapse > 1
9448 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
9449 count += fd.collapse - 1;
9450 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
9451 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
9452 tree clauses = *pc;
9453 if (parallel_for)
9454 outerc
9455 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
9456 OMP_CLAUSE__LOOPTEMP_);
9457 for (i = 0; i < count; i++)
9459 tree temp;
9460 if (parallel_for)
9462 gcc_assert (outerc);
9463 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
9464 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
9465 OMP_CLAUSE__LOOPTEMP_);
9467 else
9469 temp = create_tmp_var (type, NULL);
9470 insert_decl_map (&ctx->outer->cb, temp, temp);
9472 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
9473 OMP_CLAUSE_DECL (*pc) = temp;
9474 pc = &OMP_CLAUSE_CHAIN (*pc);
9476 *pc = clauses;
9479 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9480 dlist = NULL;
9481 body = NULL;
9482 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
9483 fdp);
9484 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
9486 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9488 /* Lower the header expressions. At this point, we can assume that
9489 the header is of the form:
9491 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9493 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9494 using the .omp_data_s mapping, if needed. */
9495 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
9497 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
9498 if (!is_gimple_min_invariant (*rhs_p))
9499 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9501 rhs_p = gimple_omp_for_final_ptr (stmt, i);
9502 if (!is_gimple_min_invariant (*rhs_p))
9503 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9505 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
9506 if (!is_gimple_min_invariant (*rhs_p))
9507 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9510 /* Once lowered, extract the bounds and clauses. */
9511 extract_omp_for_data (stmt, &fd, NULL);
9513 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
9515 gimple_seq_add_stmt (&body, stmt);
9516 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
9518 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
9519 fd.loop.v));
9521 /* After the loop, add exit clauses. */
9522 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
9524 if (ctx->cancellable)
9525 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
9527 gimple_seq_add_seq (&body, dlist);
9529 body = maybe_catch_exception (body);
9531 /* Region exit marker goes at the end of the loop body. */
9532 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
9533 maybe_add_implicit_barrier_cancel (ctx, &body);
9534 pop_gimplify_context (new_stmt);
9536 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9537 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
9538 if (BLOCK_VARS (block))
9539 TREE_USED (block) = 1;
9541 gimple_bind_set_body (new_stmt, body);
9542 gimple_omp_set_body (stmt, NULL);
9543 gimple_omp_for_set_pre_body (stmt, NULL);
9546 /* Callback for walk_stmts. Check if the current statement only contains
9547 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9549 static tree
9550 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9551 bool *handled_ops_p,
9552 struct walk_stmt_info *wi)
9554 int *info = (int *) wi->info;
9555 gimple stmt = gsi_stmt (*gsi_p);
9557 *handled_ops_p = true;
9558 switch (gimple_code (stmt))
9560 WALK_SUBSTMTS;
9562 case GIMPLE_OMP_FOR:
9563 case GIMPLE_OMP_SECTIONS:
9564 *info = *info == 0 ? 1 : -1;
9565 break;
9566 default:
9567 *info = -1;
9568 break;
9570 return NULL;
9573 struct omp_taskcopy_context
9575 /* This field must be at the beginning, as we do "inheritance": Some
9576 callback functions for tree-inline.c (e.g., omp_copy_decl)
9577 receive a copy_body_data pointer that is up-casted to an
9578 omp_context pointer. */
9579 copy_body_data cb;
9580 omp_context *ctx;
9583 static tree
9584 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9586 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9588 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9589 return create_tmp_var (TREE_TYPE (var), NULL);
9591 return var;
9594 static tree
9595 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9597 tree name, new_fields = NULL, type, f;
9599 type = lang_hooks.types.make_type (RECORD_TYPE);
9600 name = DECL_NAME (TYPE_NAME (orig_type));
9601 name = build_decl (gimple_location (tcctx->ctx->stmt),
9602 TYPE_DECL, name, type);
9603 TYPE_NAME (type) = name;
9605 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9607 tree new_f = copy_node (f);
9608 DECL_CONTEXT (new_f) = type;
9609 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9610 TREE_CHAIN (new_f) = new_fields;
9611 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9612 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9613 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9614 &tcctx->cb, NULL);
9615 new_fields = new_f;
9616 tcctx->cb.decl_map->put (f, new_f);
9618 TYPE_FIELDS (type) = nreverse (new_fields);
9619 layout_type (type);
9620 return type;
9623 /* Create task copyfn. */
9625 static void
9626 create_task_copyfn (gimple task_stmt, omp_context *ctx)
9628 struct function *child_cfun;
9629 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9630 tree record_type, srecord_type, bind, list;
9631 bool record_needs_remap = false, srecord_needs_remap = false;
9632 splay_tree_node n;
9633 struct omp_taskcopy_context tcctx;
9634 location_t loc = gimple_location (task_stmt);
9636 child_fn = gimple_omp_task_copy_fn (task_stmt);
9637 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9638 gcc_assert (child_cfun->cfg == NULL);
9639 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9641 /* Reset DECL_CONTEXT on function arguments. */
9642 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9643 DECL_CONTEXT (t) = child_fn;
9645 /* Populate the function. */
9646 push_gimplify_context ();
9647 push_cfun (child_cfun);
9649 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9650 TREE_SIDE_EFFECTS (bind) = 1;
9651 list = NULL;
9652 DECL_SAVED_TREE (child_fn) = bind;
9653 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9655 /* Remap src and dst argument types if needed. */
9656 record_type = ctx->record_type;
9657 srecord_type = ctx->srecord_type;
9658 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9659 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9661 record_needs_remap = true;
9662 break;
9664 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9665 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9667 srecord_needs_remap = true;
9668 break;
9671 if (record_needs_remap || srecord_needs_remap)
9673 memset (&tcctx, '\0', sizeof (tcctx));
9674 tcctx.cb.src_fn = ctx->cb.src_fn;
9675 tcctx.cb.dst_fn = child_fn;
9676 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
9677 gcc_checking_assert (tcctx.cb.src_node);
9678 tcctx.cb.dst_node = tcctx.cb.src_node;
9679 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9680 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9681 tcctx.cb.eh_lp_nr = 0;
9682 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9683 tcctx.cb.decl_map = new hash_map<tree, tree>;
9684 tcctx.ctx = ctx;
9686 if (record_needs_remap)
9687 record_type = task_copyfn_remap_type (&tcctx, record_type);
9688 if (srecord_needs_remap)
9689 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9691 else
9692 tcctx.cb.decl_map = NULL;
9694 arg = DECL_ARGUMENTS (child_fn);
9695 TREE_TYPE (arg) = build_pointer_type (record_type);
9696 sarg = DECL_CHAIN (arg);
9697 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9699 /* First pass: initialize temporaries used in record_type and srecord_type
9700 sizes and field offsets. */
9701 if (tcctx.cb.decl_map)
9702 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9703 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9705 tree *p;
9707 decl = OMP_CLAUSE_DECL (c);
9708 p = tcctx.cb.decl_map->get (decl);
9709 if (p == NULL)
9710 continue;
9711 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9712 sf = (tree) n->value;
9713 sf = *tcctx.cb.decl_map->get (sf);
9714 src = build_simple_mem_ref_loc (loc, sarg);
9715 src = omp_build_component_ref (src, sf);
9716 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9717 append_to_statement_list (t, &list);
9720 /* Second pass: copy shared var pointers and copy construct non-VLA
9721 firstprivate vars. */
9722 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9723 switch (OMP_CLAUSE_CODE (c))
9725 case OMP_CLAUSE_SHARED:
9726 decl = OMP_CLAUSE_DECL (c);
9727 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9728 if (n == NULL)
9729 break;
9730 f = (tree) n->value;
9731 if (tcctx.cb.decl_map)
9732 f = *tcctx.cb.decl_map->get (f);
9733 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9734 sf = (tree) n->value;
9735 if (tcctx.cb.decl_map)
9736 sf = *tcctx.cb.decl_map->get (sf);
9737 src = build_simple_mem_ref_loc (loc, sarg);
9738 src = omp_build_component_ref (src, sf);
9739 dst = build_simple_mem_ref_loc (loc, arg);
9740 dst = omp_build_component_ref (dst, f);
9741 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9742 append_to_statement_list (t, &list);
9743 break;
9744 case OMP_CLAUSE_FIRSTPRIVATE:
9745 decl = OMP_CLAUSE_DECL (c);
9746 if (is_variable_sized (decl))
9747 break;
9748 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9749 if (n == NULL)
9750 break;
9751 f = (tree) n->value;
9752 if (tcctx.cb.decl_map)
9753 f = *tcctx.cb.decl_map->get (f);
9754 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9755 if (n != NULL)
9757 sf = (tree) n->value;
9758 if (tcctx.cb.decl_map)
9759 sf = *tcctx.cb.decl_map->get (sf);
9760 src = build_simple_mem_ref_loc (loc, sarg);
9761 src = omp_build_component_ref (src, sf);
9762 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9763 src = build_simple_mem_ref_loc (loc, src);
9765 else
9766 src = decl;
9767 dst = build_simple_mem_ref_loc (loc, arg);
9768 dst = omp_build_component_ref (dst, f);
9769 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9770 append_to_statement_list (t, &list);
9771 break;
9772 case OMP_CLAUSE_PRIVATE:
9773 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9774 break;
9775 decl = OMP_CLAUSE_DECL (c);
9776 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9777 f = (tree) n->value;
9778 if (tcctx.cb.decl_map)
9779 f = *tcctx.cb.decl_map->get (f);
9780 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9781 if (n != NULL)
9783 sf = (tree) n->value;
9784 if (tcctx.cb.decl_map)
9785 sf = *tcctx.cb.decl_map->get (sf);
9786 src = build_simple_mem_ref_loc (loc, sarg);
9787 src = omp_build_component_ref (src, sf);
9788 if (use_pointer_for_field (decl, NULL))
9789 src = build_simple_mem_ref_loc (loc, src);
9791 else
9792 src = decl;
9793 dst = build_simple_mem_ref_loc (loc, arg);
9794 dst = omp_build_component_ref (dst, f);
9795 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9796 append_to_statement_list (t, &list);
9797 break;
9798 default:
9799 break;
9802 /* Last pass: handle VLA firstprivates. */
9803 if (tcctx.cb.decl_map)
9804 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9805 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9807 tree ind, ptr, df;
9809 decl = OMP_CLAUSE_DECL (c);
9810 if (!is_variable_sized (decl))
9811 continue;
9812 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9813 if (n == NULL)
9814 continue;
9815 f = (tree) n->value;
9816 f = *tcctx.cb.decl_map->get (f);
9817 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9818 ind = DECL_VALUE_EXPR (decl);
9819 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9820 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9821 n = splay_tree_lookup (ctx->sfield_map,
9822 (splay_tree_key) TREE_OPERAND (ind, 0));
9823 sf = (tree) n->value;
9824 sf = *tcctx.cb.decl_map->get (sf);
9825 src = build_simple_mem_ref_loc (loc, sarg);
9826 src = omp_build_component_ref (src, sf);
9827 src = build_simple_mem_ref_loc (loc, src);
9828 dst = build_simple_mem_ref_loc (loc, arg);
9829 dst = omp_build_component_ref (dst, f);
9830 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9831 append_to_statement_list (t, &list);
9832 n = splay_tree_lookup (ctx->field_map,
9833 (splay_tree_key) TREE_OPERAND (ind, 0));
9834 df = (tree) n->value;
9835 df = *tcctx.cb.decl_map->get (df);
9836 ptr = build_simple_mem_ref_loc (loc, arg);
9837 ptr = omp_build_component_ref (ptr, df);
9838 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9839 build_fold_addr_expr_loc (loc, dst));
9840 append_to_statement_list (t, &list);
9843 t = build1 (RETURN_EXPR, void_type_node, NULL);
9844 append_to_statement_list (t, &list);
9846 if (tcctx.cb.decl_map)
9847 delete tcctx.cb.decl_map;
9848 pop_gimplify_context (NULL);
9849 BIND_EXPR_BODY (bind) = list;
9850 pop_cfun ();
9853 static void
9854 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9856 tree c, clauses;
9857 gimple g;
9858 size_t n_in = 0, n_out = 0, idx = 2, i;
9860 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9861 OMP_CLAUSE_DEPEND);
9862 gcc_assert (clauses);
9863 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9864 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9865 switch (OMP_CLAUSE_DEPEND_KIND (c))
9867 case OMP_CLAUSE_DEPEND_IN:
9868 n_in++;
9869 break;
9870 case OMP_CLAUSE_DEPEND_OUT:
9871 case OMP_CLAUSE_DEPEND_INOUT:
9872 n_out++;
9873 break;
9874 default:
9875 gcc_unreachable ();
9877 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9878 tree array = create_tmp_var (type, NULL);
9879 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9880 NULL_TREE);
9881 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9882 gimple_seq_add_stmt (iseq, g);
9883 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9884 NULL_TREE);
9885 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9886 gimple_seq_add_stmt (iseq, g);
9887 for (i = 0; i < 2; i++)
9889 if ((i ? n_in : n_out) == 0)
9890 continue;
9891 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9892 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9893 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9895 tree t = OMP_CLAUSE_DECL (c);
9896 t = fold_convert (ptr_type_node, t);
9897 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9898 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9899 NULL_TREE, NULL_TREE);
9900 g = gimple_build_assign (r, t);
9901 gimple_seq_add_stmt (iseq, g);
9904 tree *p = gimple_omp_task_clauses_ptr (stmt);
9905 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9906 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9907 OMP_CLAUSE_CHAIN (c) = *p;
9908 *p = c;
9909 tree clobber = build_constructor (type, NULL);
9910 TREE_THIS_VOLATILE (clobber) = 1;
9911 g = gimple_build_assign (array, clobber);
9912 gimple_seq_add_stmt (oseq, g);
9915 /* Lower the OpenMP parallel or task directive in the current statement
9916 in GSI_P. CTX holds context information for the directive. */
9918 static void
9919 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9921 tree clauses;
9922 tree child_fn, t;
9923 gimple stmt = gsi_stmt (*gsi_p);
9924 gimple_bind par_bind, bind, dep_bind = NULL;
9925 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9926 location_t loc = gimple_location (stmt);
9928 clauses = gimple_omp_taskreg_clauses (stmt);
9929 par_bind =
9930 as_a <gimple_bind> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
9931 par_body = gimple_bind_body (par_bind);
9932 child_fn = ctx->cb.dst_fn;
9933 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9934 && !gimple_omp_parallel_combined_p (stmt))
9936 struct walk_stmt_info wi;
9937 int ws_num = 0;
9939 memset (&wi, 0, sizeof (wi));
9940 wi.info = &ws_num;
9941 wi.val_only = true;
9942 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
9943 if (ws_num == 1)
9944 gimple_omp_parallel_set_combined_p (stmt, true);
9946 gimple_seq dep_ilist = NULL;
9947 gimple_seq dep_olist = NULL;
9948 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9949 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9951 push_gimplify_context ();
9952 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9953 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9956 if (ctx->srecord_type)
9957 create_task_copyfn (stmt, ctx);
9959 push_gimplify_context ();
9961 par_olist = NULL;
9962 par_ilist = NULL;
9963 par_rlist = NULL;
9964 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
9965 lower_omp (&par_body, ctx);
9966 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
9967 lower_reduction_clauses (clauses, &par_rlist, ctx);
9969 /* Declare all the variables created by mapping and the variables
9970 declared in the scope of the parallel body. */
9971 record_vars_into (ctx->block_vars, child_fn);
9972 record_vars_into (gimple_bind_vars (par_bind), child_fn);
9974 if (ctx->record_type)
9976 ctx->sender_decl
9977 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9978 : ctx->record_type, ".omp_data_o");
9979 DECL_NAMELESS (ctx->sender_decl) = 1;
9980 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9981 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
9984 olist = NULL;
9985 ilist = NULL;
9986 lower_send_clauses (clauses, &ilist, &olist, ctx);
9987 lower_send_shared_vars (&ilist, &olist, ctx);
9989 if (ctx->record_type)
9991 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
9992 TREE_THIS_VOLATILE (clobber) = 1;
9993 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9994 clobber));
9997 /* Once all the expansions are done, sequence all the different
9998 fragments inside gimple_omp_body. */
10000 new_body = NULL;
10002 if (ctx->record_type)
10004 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10005 /* fixup_child_record_type might have changed receiver_decl's type. */
10006 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10007 gimple_seq_add_stmt (&new_body,
10008 gimple_build_assign (ctx->receiver_decl, t));
10011 gimple_seq_add_seq (&new_body, par_ilist);
10012 gimple_seq_add_seq (&new_body, par_body);
10013 gimple_seq_add_seq (&new_body, par_rlist);
10014 if (ctx->cancellable)
10015 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
10016 gimple_seq_add_seq (&new_body, par_olist);
10017 new_body = maybe_catch_exception (new_body);
10018 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10019 gimple_omp_set_body (stmt, new_body);
10021 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
10022 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
10023 gimple_bind_add_seq (bind, ilist);
10024 gimple_bind_add_stmt (bind, stmt);
10025 gimple_bind_add_seq (bind, olist);
10027 pop_gimplify_context (NULL);
10029 if (dep_bind)
10031 gimple_bind_add_seq (dep_bind, dep_ilist);
10032 gimple_bind_add_stmt (dep_bind, bind);
10033 gimple_bind_add_seq (dep_bind, dep_olist);
10034 pop_gimplify_context (dep_bind);
10038 /* Lower the OpenMP target directive in the current statement
10039 in GSI_P. CTX holds context information for the directive. */
10041 static void
10042 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10044 tree clauses;
10045 tree child_fn, t, c;
10046 gimple stmt = gsi_stmt (*gsi_p);
10047 gimple_bind tgt_bind = NULL, bind;
10048 gimple_seq tgt_body = NULL, olist, ilist, new_body;
10049 location_t loc = gimple_location (stmt);
10050 int kind = gimple_omp_target_kind (stmt);
10051 unsigned int map_cnt = 0;
10053 clauses = gimple_omp_target_clauses (stmt);
10054 if (kind == GF_OMP_TARGET_KIND_REGION)
10056 tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
10057 tgt_body = gimple_bind_body (tgt_bind);
10059 else if (kind == GF_OMP_TARGET_KIND_DATA)
10060 tgt_body = gimple_omp_body (stmt);
10061 child_fn = ctx->cb.dst_fn;
10063 push_gimplify_context ();
10065 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10066 switch (OMP_CLAUSE_CODE (c))
10068 tree var, x;
10070 default:
10071 break;
10072 case OMP_CLAUSE_MAP:
10073 case OMP_CLAUSE_TO:
10074 case OMP_CLAUSE_FROM:
10075 var = OMP_CLAUSE_DECL (c);
10076 if (!DECL_P (var))
10078 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
10079 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10080 map_cnt++;
10081 continue;
10084 if (DECL_SIZE (var)
10085 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
10087 tree var2 = DECL_VALUE_EXPR (var);
10088 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
10089 var2 = TREE_OPERAND (var2, 0);
10090 gcc_assert (DECL_P (var2));
10091 var = var2;
10094 if (!maybe_lookup_field (var, ctx))
10095 continue;
10097 if (kind == GF_OMP_TARGET_KIND_REGION)
10099 x = build_receiver_ref (var, true, ctx);
10100 tree new_var = lookup_decl (var, ctx);
10101 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10102 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10103 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10104 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
10105 x = build_simple_mem_ref (x);
10106 SET_DECL_VALUE_EXPR (new_var, x);
10107 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
10109 map_cnt++;
10112 if (kind == GF_OMP_TARGET_KIND_REGION)
10114 target_nesting_level++;
10115 lower_omp (&tgt_body, ctx);
10116 target_nesting_level--;
10118 else if (kind == GF_OMP_TARGET_KIND_DATA)
10119 lower_omp (&tgt_body, ctx);
10121 if (kind == GF_OMP_TARGET_KIND_REGION)
10123 /* Declare all the variables created by mapping and the variables
10124 declared in the scope of the target body. */
10125 record_vars_into (ctx->block_vars, child_fn);
10126 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
10129 olist = NULL;
10130 ilist = NULL;
10131 if (ctx->record_type)
10133 ctx->sender_decl
10134 = create_tmp_var (ctx->record_type, ".omp_data_arr");
10135 DECL_NAMELESS (ctx->sender_decl) = 1;
10136 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
10137 t = make_tree_vec (3);
10138 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
10139 TREE_VEC_ELT (t, 1)
10140 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
10141 ".omp_data_sizes");
10142 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
10143 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
10144 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
10145 TREE_VEC_ELT (t, 2)
10146 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
10147 map_cnt),
10148 ".omp_data_kinds");
10149 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
10150 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
10151 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
10152 gimple_omp_target_set_data_arg (stmt, t);
10154 vec<constructor_elt, va_gc> *vsize;
10155 vec<constructor_elt, va_gc> *vkind;
10156 vec_alloc (vsize, map_cnt);
10157 vec_alloc (vkind, map_cnt);
10158 unsigned int map_idx = 0;
10160 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10161 switch (OMP_CLAUSE_CODE (c))
10163 tree ovar, nc;
10165 default:
10166 break;
10167 case OMP_CLAUSE_MAP:
10168 case OMP_CLAUSE_TO:
10169 case OMP_CLAUSE_FROM:
10170 nc = c;
10171 ovar = OMP_CLAUSE_DECL (c);
10172 if (!DECL_P (ovar))
10174 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10175 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10177 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
10178 == get_base_address (ovar));
10179 nc = OMP_CLAUSE_CHAIN (c);
10180 ovar = OMP_CLAUSE_DECL (nc);
10182 else
10184 tree x = build_sender_ref (ovar, ctx);
10185 tree v
10186 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
10187 gimplify_assign (x, v, &ilist);
10188 nc = NULL_TREE;
10191 else
10193 if (DECL_SIZE (ovar)
10194 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
10196 tree ovar2 = DECL_VALUE_EXPR (ovar);
10197 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
10198 ovar2 = TREE_OPERAND (ovar2, 0);
10199 gcc_assert (DECL_P (ovar2));
10200 ovar = ovar2;
10202 if (!maybe_lookup_field (ovar, ctx))
10203 continue;
10206 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
10207 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
10208 talign = DECL_ALIGN_UNIT (ovar);
10209 if (nc)
10211 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
10212 tree x = build_sender_ref (ovar, ctx);
10213 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10214 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10215 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10216 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
10218 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10219 tree avar
10220 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
10221 mark_addressable (avar);
10222 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
10223 talign = DECL_ALIGN_UNIT (avar);
10224 avar = build_fold_addr_expr (avar);
10225 gimplify_assign (x, avar, &ilist);
10227 else if (is_gimple_reg (var))
10229 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10230 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
10231 mark_addressable (avar);
10232 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
10233 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
10234 gimplify_assign (avar, var, &ilist);
10235 avar = build_fold_addr_expr (avar);
10236 gimplify_assign (x, avar, &ilist);
10237 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
10238 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
10239 && !TYPE_READONLY (TREE_TYPE (var)))
10241 x = build_sender_ref (ovar, ctx);
10242 x = build_simple_mem_ref (x);
10243 gimplify_assign (var, x, &olist);
10246 else
10248 var = build_fold_addr_expr (var);
10249 gimplify_assign (x, var, &ilist);
10252 tree s = OMP_CLAUSE_SIZE (c);
10253 if (s == NULL_TREE)
10254 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
10255 s = fold_convert (size_type_node, s);
10256 tree purpose = size_int (map_idx++);
10257 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
10258 if (TREE_CODE (s) != INTEGER_CST)
10259 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
10261 unsigned char tkind = 0;
10262 switch (OMP_CLAUSE_CODE (c))
10264 case OMP_CLAUSE_MAP:
10265 tkind = OMP_CLAUSE_MAP_KIND (c);
10266 break;
10267 case OMP_CLAUSE_TO:
10268 tkind = OMP_CLAUSE_MAP_TO;
10269 break;
10270 case OMP_CLAUSE_FROM:
10271 tkind = OMP_CLAUSE_MAP_FROM;
10272 break;
10273 default:
10274 gcc_unreachable ();
10276 talign = ceil_log2 (talign);
10277 tkind |= talign << 3;
10278 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
10279 build_int_cst (unsigned_char_type_node,
10280 tkind));
10281 if (nc && nc != c)
10282 c = nc;
10285 gcc_assert (map_idx == map_cnt);
10287 DECL_INITIAL (TREE_VEC_ELT (t, 1))
10288 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
10289 DECL_INITIAL (TREE_VEC_ELT (t, 2))
10290 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
10291 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
10293 gimple_seq initlist = NULL;
10294 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
10295 TREE_VEC_ELT (t, 1)),
10296 &initlist, true, NULL_TREE);
10297 gimple_seq_add_seq (&ilist, initlist);
10299 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
10300 NULL);
10301 TREE_THIS_VOLATILE (clobber) = 1;
10302 gimple_seq_add_stmt (&olist,
10303 gimple_build_assign (TREE_VEC_ELT (t, 1),
10304 clobber));
10307 tree clobber = build_constructor (ctx->record_type, NULL);
10308 TREE_THIS_VOLATILE (clobber) = 1;
10309 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
10310 clobber));
10313 /* Once all the expansions are done, sequence all the different
10314 fragments inside gimple_omp_body. */
10316 new_body = NULL;
10318 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
10320 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10321 /* fixup_child_record_type might have changed receiver_decl's type. */
10322 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10323 gimple_seq_add_stmt (&new_body,
10324 gimple_build_assign (ctx->receiver_decl, t));
10327 if (kind == GF_OMP_TARGET_KIND_REGION)
10329 gimple_seq_add_seq (&new_body, tgt_body);
10330 new_body = maybe_catch_exception (new_body);
10332 else if (kind == GF_OMP_TARGET_KIND_DATA)
10333 new_body = tgt_body;
10334 if (kind != GF_OMP_TARGET_KIND_UPDATE)
10336 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10337 gimple_omp_set_body (stmt, new_body);
10340 bind = gimple_build_bind (NULL, NULL,
10341 tgt_bind ? gimple_bind_block (tgt_bind)
10342 : NULL_TREE);
10343 gsi_replace (gsi_p, bind, true);
10344 gimple_bind_add_seq (bind, ilist);
10345 gimple_bind_add_stmt (bind, stmt);
10346 gimple_bind_add_seq (bind, olist);
10348 pop_gimplify_context (NULL);
10351 /* Expand code for an OpenMP teams directive. */
10353 static void
10354 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10356 gimple teams_stmt = gsi_stmt (*gsi_p);
10357 push_gimplify_context ();
10359 tree block = make_node (BLOCK);
10360 gimple_bind bind = gimple_build_bind (NULL, NULL, block);
10361 gsi_replace (gsi_p, bind, true);
10362 gimple_seq bind_body = NULL;
10363 gimple_seq dlist = NULL;
10364 gimple_seq olist = NULL;
10366 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10367 OMP_CLAUSE_NUM_TEAMS);
10368 if (num_teams == NULL_TREE)
10369 num_teams = build_int_cst (unsigned_type_node, 0);
10370 else
10372 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
10373 num_teams = fold_convert (unsigned_type_node, num_teams);
10374 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
10376 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10377 OMP_CLAUSE_THREAD_LIMIT);
10378 if (thread_limit == NULL_TREE)
10379 thread_limit = build_int_cst (unsigned_type_node, 0);
10380 else
10382 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
10383 thread_limit = fold_convert (unsigned_type_node, thread_limit);
10384 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
10385 fb_rvalue);
10388 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
10389 &bind_body, &dlist, ctx, NULL);
10390 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
10391 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
10392 gimple_seq_add_stmt (&bind_body, teams_stmt);
10394 location_t loc = gimple_location (teams_stmt);
10395 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
10396 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
10397 gimple_set_location (call, loc);
10398 gimple_seq_add_stmt (&bind_body, call);
10400 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
10401 gimple_omp_set_body (teams_stmt, NULL);
10402 gimple_seq_add_seq (&bind_body, olist);
10403 gimple_seq_add_seq (&bind_body, dlist);
10404 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
10405 gimple_bind_set_body (bind, bind_body);
10407 pop_gimplify_context (bind);
10409 gimple_bind_append_vars (bind, ctx->block_vars);
10410 BLOCK_VARS (block) = ctx->block_vars;
10411 if (BLOCK_VARS (block))
10412 TREE_USED (block) = 1;
10416 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
10417 regimplified. If DATA is non-NULL, lower_omp_1 is outside
10418 of OpenMP context, but with task_shared_vars set. */
10420 static tree
10421 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
10422 void *data)
10424 tree t = *tp;
10426 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
10427 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
10428 return t;
10430 if (task_shared_vars
10431 && DECL_P (t)
10432 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
10433 return t;
10435 /* If a global variable has been privatized, TREE_CONSTANT on
10436 ADDR_EXPR might be wrong. */
10437 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
10438 recompute_tree_invariant_for_addr_expr (t);
10440 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
10441 return NULL_TREE;
10444 static void
10445 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10447 gimple stmt = gsi_stmt (*gsi_p);
10448 struct walk_stmt_info wi;
10450 if (gimple_has_location (stmt))
10451 input_location = gimple_location (stmt);
10453 if (task_shared_vars)
10454 memset (&wi, '\0', sizeof (wi));
10456 /* If we have issued syntax errors, avoid doing any heavy lifting.
10457 Just replace the OpenMP directives with a NOP to avoid
10458 confusing RTL expansion. */
10459 if (seen_error () && is_gimple_omp (stmt))
10461 gsi_replace (gsi_p, gimple_build_nop (), true);
10462 return;
10465 switch (gimple_code (stmt))
10467 case GIMPLE_COND:
10468 if ((ctx || task_shared_vars)
10469 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
10470 ctx ? NULL : &wi, NULL)
10471 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
10472 ctx ? NULL : &wi, NULL)))
10473 gimple_regimplify_operands (stmt, gsi_p);
10474 break;
10475 case GIMPLE_CATCH:
10476 lower_omp (gimple_catch_handler_ptr (as_a <gimple_catch> (stmt)), ctx);
10477 break;
10478 case GIMPLE_EH_FILTER:
10479 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
10480 break;
10481 case GIMPLE_TRY:
10482 lower_omp (gimple_try_eval_ptr (stmt), ctx);
10483 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
10484 break;
10485 case GIMPLE_TRANSACTION:
10486 lower_omp (gimple_transaction_body_ptr (
10487 as_a <gimple_transaction> (stmt)),
10488 ctx);
10489 break;
10490 case GIMPLE_BIND:
10491 lower_omp (gimple_bind_body_ptr (as_a <gimple_bind> (stmt)), ctx);
10492 break;
10493 case GIMPLE_OMP_PARALLEL:
10494 case GIMPLE_OMP_TASK:
10495 ctx = maybe_lookup_ctx (stmt);
10496 gcc_assert (ctx);
10497 if (ctx->cancellable)
10498 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10499 lower_omp_taskreg (gsi_p, ctx);
10500 break;
10501 case GIMPLE_OMP_FOR:
10502 ctx = maybe_lookup_ctx (stmt);
10503 gcc_assert (ctx);
10504 if (ctx->cancellable)
10505 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10506 lower_omp_for (gsi_p, ctx);
10507 break;
10508 case GIMPLE_OMP_SECTIONS:
10509 ctx = maybe_lookup_ctx (stmt);
10510 gcc_assert (ctx);
10511 if (ctx->cancellable)
10512 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10513 lower_omp_sections (gsi_p, ctx);
10514 break;
10515 case GIMPLE_OMP_SINGLE:
10516 ctx = maybe_lookup_ctx (stmt);
10517 gcc_assert (ctx);
10518 lower_omp_single (gsi_p, ctx);
10519 break;
10520 case GIMPLE_OMP_MASTER:
10521 ctx = maybe_lookup_ctx (stmt);
10522 gcc_assert (ctx);
10523 lower_omp_master (gsi_p, ctx);
10524 break;
10525 case GIMPLE_OMP_TASKGROUP:
10526 ctx = maybe_lookup_ctx (stmt);
10527 gcc_assert (ctx);
10528 lower_omp_taskgroup (gsi_p, ctx);
10529 break;
10530 case GIMPLE_OMP_ORDERED:
10531 ctx = maybe_lookup_ctx (stmt);
10532 gcc_assert (ctx);
10533 lower_omp_ordered (gsi_p, ctx);
10534 break;
10535 case GIMPLE_OMP_CRITICAL:
10536 ctx = maybe_lookup_ctx (stmt);
10537 gcc_assert (ctx);
10538 lower_omp_critical (gsi_p, ctx);
10539 break;
10540 case GIMPLE_OMP_ATOMIC_LOAD:
10541 if ((ctx || task_shared_vars)
10542 && walk_tree (gimple_omp_atomic_load_rhs_ptr (
10543 as_a <gimple_omp_atomic_load> (stmt)),
10544 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
10545 gimple_regimplify_operands (stmt, gsi_p);
10546 break;
10547 case GIMPLE_OMP_TARGET:
10548 ctx = maybe_lookup_ctx (stmt);
10549 gcc_assert (ctx);
10550 lower_omp_target (gsi_p, ctx);
10551 break;
10552 case GIMPLE_OMP_TEAMS:
10553 ctx = maybe_lookup_ctx (stmt);
10554 gcc_assert (ctx);
10555 lower_omp_teams (gsi_p, ctx);
10556 break;
10557 case GIMPLE_CALL:
10558 tree fndecl;
10559 fndecl = gimple_call_fndecl (stmt);
10560 if (fndecl
10561 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10562 switch (DECL_FUNCTION_CODE (fndecl))
10564 case BUILT_IN_GOMP_BARRIER:
10565 if (ctx == NULL)
10566 break;
10567 /* FALLTHRU */
10568 case BUILT_IN_GOMP_CANCEL:
10569 case BUILT_IN_GOMP_CANCELLATION_POINT:
10570 omp_context *cctx;
10571 cctx = ctx;
10572 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10573 cctx = cctx->outer;
10574 gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
10575 if (!cctx->cancellable)
10577 if (DECL_FUNCTION_CODE (fndecl)
10578 == BUILT_IN_GOMP_CANCELLATION_POINT)
10580 stmt = gimple_build_nop ();
10581 gsi_replace (gsi_p, stmt, false);
10583 break;
10585 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10587 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10588 gimple_call_set_fndecl (stmt, fndecl);
10589 gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
10591 tree lhs;
10592 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)), NULL);
10593 gimple_call_set_lhs (stmt, lhs);
10594 tree fallthru_label;
10595 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10596 gimple g;
10597 g = gimple_build_label (fallthru_label);
10598 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10599 g = gimple_build_cond (NE_EXPR, lhs,
10600 fold_convert (TREE_TYPE (lhs),
10601 boolean_false_node),
10602 cctx->cancel_label, fallthru_label);
10603 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10604 break;
10605 default:
10606 break;
10608 /* FALLTHRU */
10609 default:
10610 if ((ctx || task_shared_vars)
10611 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10612 ctx ? NULL : &wi))
10614 /* Just remove clobbers, this should happen only if we have
10615 "privatized" local addressable variables in SIMD regions,
10616 the clobber isn't needed in that case and gimplifying address
10617 of the ARRAY_REF into a pointer and creating MEM_REF based
10618 clobber would create worse code than we get with the clobber
10619 dropped. */
10620 if (gimple_clobber_p (stmt))
10622 gsi_replace (gsi_p, gimple_build_nop (), true);
10623 break;
10625 gimple_regimplify_operands (stmt, gsi_p);
10627 break;
10631 static void
10632 lower_omp (gimple_seq *body, omp_context *ctx)
10634 location_t saved_location = input_location;
10635 gimple_stmt_iterator gsi;
10636 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10637 lower_omp_1 (&gsi, ctx);
10638 /* During gimplification, we have not always invoked fold_stmt
10639 (gimplify.c:maybe_fold_stmt); call it now. */
10640 if (target_nesting_level)
10641 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10642 fold_stmt (&gsi);
10643 input_location = saved_location;
10646 /* Main entry point. */
10648 static unsigned int
10649 execute_lower_omp (void)
10651 gimple_seq body;
10652 int i;
10653 omp_context *ctx;
10655 /* This pass always runs, to provide PROP_gimple_lomp.
10656 But there is nothing to do unless -fopenmp is given. */
10657 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_cilkplus == 0)
10658 return 0;
10660 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10661 delete_omp_context);
10663 body = gimple_body (current_function_decl);
10664 scan_omp (&body, NULL);
10665 gcc_assert (taskreg_nesting_level == 0);
10666 FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
10667 finish_taskreg_scan (ctx);
10668 taskreg_contexts.release ();
10670 if (all_contexts->root)
10672 if (task_shared_vars)
10673 push_gimplify_context ();
10674 lower_omp (&body, NULL);
10675 if (task_shared_vars)
10676 pop_gimplify_context (NULL);
10679 if (all_contexts)
10681 splay_tree_delete (all_contexts);
10682 all_contexts = NULL;
10684 BITMAP_FREE (task_shared_vars);
10685 return 0;
10688 namespace {
10690 const pass_data pass_data_lower_omp =
10692 GIMPLE_PASS, /* type */
10693 "omplower", /* name */
10694 OPTGROUP_NONE, /* optinfo_flags */
10695 TV_NONE, /* tv_id */
10696 PROP_gimple_any, /* properties_required */
10697 PROP_gimple_lomp, /* properties_provided */
10698 0, /* properties_destroyed */
10699 0, /* todo_flags_start */
10700 0, /* todo_flags_finish */
10703 class pass_lower_omp : public gimple_opt_pass
10705 public:
10706 pass_lower_omp (gcc::context *ctxt)
10707 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10710 /* opt_pass methods: */
10711 virtual unsigned int execute (function *) { return execute_lower_omp (); }
10713 }; // class pass_lower_omp
10715 } // anon namespace
10717 gimple_opt_pass *
10718 make_pass_lower_omp (gcc::context *ctxt)
10720 return new pass_lower_omp (ctxt);
10723 /* The following is a utility to diagnose OpenMP structured block violations.
10724 It is not part of the "omplower" pass, as that's invoked too late. It
10725 should be invoked by the respective front ends after gimplification. */
10727 static splay_tree all_labels;
10729 /* Check for mismatched contexts and generate an error if needed. Return
10730 true if an error is detected. */
10732 static bool
10733 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10734 gimple branch_ctx, gimple label_ctx)
10736 if (label_ctx == branch_ctx)
10737 return false;
10741 Previously we kept track of the label's entire context in diagnose_sb_[12]
10742 so we could traverse it and issue a correct "exit" or "enter" error
10743 message upon a structured block violation.
10745 We built the context by building a list with tree_cons'ing, but there is
10746 no easy counterpart in gimple tuples. It seems like far too much work
10747 for issuing exit/enter error messages. If someone really misses the
10748 distinct error message... patches welcome.
10751 #if 0
10752 /* Try to avoid confusing the user by producing and error message
10753 with correct "exit" or "enter" verbiage. We prefer "exit"
10754 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10755 if (branch_ctx == NULL)
10756 exit_p = false;
10757 else
10759 while (label_ctx)
10761 if (TREE_VALUE (label_ctx) == branch_ctx)
10763 exit_p = false;
10764 break;
10766 label_ctx = TREE_CHAIN (label_ctx);
10770 if (exit_p)
10771 error ("invalid exit from OpenMP structured block");
10772 else
10773 error ("invalid entry to OpenMP structured block");
10774 #endif
10776 bool cilkplus_block = false;
10777 if (flag_cilkplus)
10779 if ((branch_ctx
10780 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10781 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10782 || (label_ctx
10783 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
10784 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10785 cilkplus_block = true;
10788 /* If it's obvious we have an invalid entry, be specific about the error. */
10789 if (branch_ctx == NULL)
10791 if (cilkplus_block)
10792 error ("invalid entry to Cilk Plus structured block");
10793 else
10794 error ("invalid entry to OpenMP structured block");
10796 else
10798 /* Otherwise, be vague and lazy, but efficient. */
10799 if (cilkplus_block)
10800 error ("invalid branch to/from a Cilk Plus structured block");
10801 else
10802 error ("invalid branch to/from an OpenMP structured block");
10805 gsi_replace (gsi_p, gimple_build_nop (), false);
10806 return true;
10809 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10810 where each label is found. */
10812 static tree
10813 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10814 struct walk_stmt_info *wi)
10816 gimple context = (gimple) wi->info;
10817 gimple inner_context;
10818 gimple stmt = gsi_stmt (*gsi_p);
10820 *handled_ops_p = true;
10822 switch (gimple_code (stmt))
10824 WALK_SUBSTMTS;
10826 case GIMPLE_OMP_PARALLEL:
10827 case GIMPLE_OMP_TASK:
10828 case GIMPLE_OMP_SECTIONS:
10829 case GIMPLE_OMP_SINGLE:
10830 case GIMPLE_OMP_SECTION:
10831 case GIMPLE_OMP_MASTER:
10832 case GIMPLE_OMP_ORDERED:
10833 case GIMPLE_OMP_CRITICAL:
10834 case GIMPLE_OMP_TARGET:
10835 case GIMPLE_OMP_TEAMS:
10836 case GIMPLE_OMP_TASKGROUP:
10837 /* The minimal context here is just the current OMP construct. */
10838 inner_context = stmt;
10839 wi->info = inner_context;
10840 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10841 wi->info = context;
10842 break;
10844 case GIMPLE_OMP_FOR:
10845 inner_context = stmt;
10846 wi->info = inner_context;
10847 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10848 walk them. */
10849 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10850 diagnose_sb_1, NULL, wi);
10851 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10852 wi->info = context;
10853 break;
10855 case GIMPLE_LABEL:
10856 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
10857 (splay_tree_value) context);
10858 break;
10860 default:
10861 break;
10864 return NULL_TREE;
10867 /* Pass 2: Check each branch and see if its context differs from that of
10868 the destination label's context. */
10870 static tree
10871 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10872 struct walk_stmt_info *wi)
10874 gimple context = (gimple) wi->info;
10875 splay_tree_node n;
10876 gimple stmt = gsi_stmt (*gsi_p);
10878 *handled_ops_p = true;
10880 switch (gimple_code (stmt))
10882 WALK_SUBSTMTS;
10884 case GIMPLE_OMP_PARALLEL:
10885 case GIMPLE_OMP_TASK:
10886 case GIMPLE_OMP_SECTIONS:
10887 case GIMPLE_OMP_SINGLE:
10888 case GIMPLE_OMP_SECTION:
10889 case GIMPLE_OMP_MASTER:
10890 case GIMPLE_OMP_ORDERED:
10891 case GIMPLE_OMP_CRITICAL:
10892 case GIMPLE_OMP_TARGET:
10893 case GIMPLE_OMP_TEAMS:
10894 case GIMPLE_OMP_TASKGROUP:
10895 wi->info = stmt;
10896 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10897 wi->info = context;
10898 break;
10900 case GIMPLE_OMP_FOR:
10901 wi->info = stmt;
10902 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10903 walk them. */
10904 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10905 diagnose_sb_2, NULL, wi);
10906 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10907 wi->info = context;
10908 break;
10910 case GIMPLE_COND:
10912 tree lab = gimple_cond_true_label (stmt);
10913 if (lab)
10915 n = splay_tree_lookup (all_labels,
10916 (splay_tree_key) lab);
10917 diagnose_sb_0 (gsi_p, context,
10918 n ? (gimple) n->value : NULL);
10920 lab = gimple_cond_false_label (stmt);
10921 if (lab)
10923 n = splay_tree_lookup (all_labels,
10924 (splay_tree_key) lab);
10925 diagnose_sb_0 (gsi_p, context,
10926 n ? (gimple) n->value : NULL);
10929 break;
10931 case GIMPLE_GOTO:
10933 tree lab = gimple_goto_dest (stmt);
10934 if (TREE_CODE (lab) != LABEL_DECL)
10935 break;
10937 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10938 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
10940 break;
10942 case GIMPLE_SWITCH:
10944 gimple_switch switch_stmt = as_a <gimple_switch> (stmt);
10945 unsigned int i;
10946 for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
10948 tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
10949 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10950 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
10951 break;
10954 break;
10956 case GIMPLE_RETURN:
10957 diagnose_sb_0 (gsi_p, context, NULL);
10958 break;
10960 default:
10961 break;
10964 return NULL_TREE;
10967 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10968 codes. */
10969 bool
10970 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
10971 int *region_idx)
10973 gimple last = last_stmt (bb);
10974 enum gimple_code code = gimple_code (last);
10975 struct omp_region *cur_region = *region;
10976 bool fallthru = false;
10978 switch (code)
10980 case GIMPLE_OMP_PARALLEL:
10981 case GIMPLE_OMP_TASK:
10982 case GIMPLE_OMP_FOR:
10983 case GIMPLE_OMP_SINGLE:
10984 case GIMPLE_OMP_TEAMS:
10985 case GIMPLE_OMP_MASTER:
10986 case GIMPLE_OMP_TASKGROUP:
10987 case GIMPLE_OMP_ORDERED:
10988 case GIMPLE_OMP_CRITICAL:
10989 case GIMPLE_OMP_SECTION:
10990 cur_region = new_omp_region (bb, code, cur_region);
10991 fallthru = true;
10992 break;
10994 case GIMPLE_OMP_TARGET:
10995 cur_region = new_omp_region (bb, code, cur_region);
10996 fallthru = true;
10997 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
10998 cur_region = cur_region->outer;
10999 break;
11001 case GIMPLE_OMP_SECTIONS:
11002 cur_region = new_omp_region (bb, code, cur_region);
11003 fallthru = true;
11004 break;
11006 case GIMPLE_OMP_SECTIONS_SWITCH:
11007 fallthru = false;
11008 break;
11010 case GIMPLE_OMP_ATOMIC_LOAD:
11011 case GIMPLE_OMP_ATOMIC_STORE:
11012 fallthru = true;
11013 break;
11015 case GIMPLE_OMP_RETURN:
11016 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
11017 somewhere other than the next block. This will be
11018 created later. */
11019 cur_region->exit = bb;
11020 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
11021 cur_region = cur_region->outer;
11022 break;
11024 case GIMPLE_OMP_CONTINUE:
11025 cur_region->cont = bb;
11026 switch (cur_region->type)
11028 case GIMPLE_OMP_FOR:
11029 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
11030 succs edges as abnormal to prevent splitting
11031 them. */
11032 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
11033 /* Make the loopback edge. */
11034 make_edge (bb, single_succ (cur_region->entry),
11035 EDGE_ABNORMAL);
11037 /* Create an edge from GIMPLE_OMP_FOR to exit, which
11038 corresponds to the case that the body of the loop
11039 is not executed at all. */
11040 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
11041 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
11042 fallthru = false;
11043 break;
11045 case GIMPLE_OMP_SECTIONS:
11046 /* Wire up the edges into and out of the nested sections. */
11048 basic_block switch_bb = single_succ (cur_region->entry);
11050 struct omp_region *i;
11051 for (i = cur_region->inner; i ; i = i->next)
11053 gcc_assert (i->type == GIMPLE_OMP_SECTION);
11054 make_edge (switch_bb, i->entry, 0);
11055 make_edge (i->exit, bb, EDGE_FALLTHRU);
11058 /* Make the loopback edge to the block with
11059 GIMPLE_OMP_SECTIONS_SWITCH. */
11060 make_edge (bb, switch_bb, 0);
11062 /* Make the edge from the switch to exit. */
11063 make_edge (switch_bb, bb->next_bb, 0);
11064 fallthru = false;
11066 break;
11068 default:
11069 gcc_unreachable ();
11071 break;
11073 default:
11074 gcc_unreachable ();
11077 if (*region != cur_region)
11079 *region = cur_region;
11080 if (cur_region)
11081 *region_idx = cur_region->entry->index;
11082 else
11083 *region_idx = 0;
11086 return fallthru;
11089 static unsigned int
11090 diagnose_omp_structured_block_errors (void)
11092 struct walk_stmt_info wi;
11093 gimple_seq body = gimple_body (current_function_decl);
11095 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
11097 memset (&wi, 0, sizeof (wi));
11098 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
11100 memset (&wi, 0, sizeof (wi));
11101 wi.want_locations = true;
11102 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
11104 gimple_set_body (current_function_decl, body);
11106 splay_tree_delete (all_labels);
11107 all_labels = NULL;
11109 return 0;
11112 namespace {
11114 const pass_data pass_data_diagnose_omp_blocks =
11116 GIMPLE_PASS, /* type */
11117 "*diagnose_omp_blocks", /* name */
11118 OPTGROUP_NONE, /* optinfo_flags */
11119 TV_NONE, /* tv_id */
11120 PROP_gimple_any, /* properties_required */
11121 0, /* properties_provided */
11122 0, /* properties_destroyed */
11123 0, /* todo_flags_start */
11124 0, /* todo_flags_finish */
11127 class pass_diagnose_omp_blocks : public gimple_opt_pass
11129 public:
11130 pass_diagnose_omp_blocks (gcc::context *ctxt)
11131 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
11134 /* opt_pass methods: */
11135 virtual bool gate (function *) { return flag_openmp || flag_cilkplus; }
11136 virtual unsigned int execute (function *)
11138 return diagnose_omp_structured_block_errors ();
11141 }; // class pass_diagnose_omp_blocks
11143 } // anon namespace
11145 gimple_opt_pass *
11146 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
11148 return new pass_diagnose_omp_blocks (ctxt);
11151 /* SIMD clone supporting code. */
11153 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
11154 of arguments to reserve space for. */
11156 static struct cgraph_simd_clone *
11157 simd_clone_struct_alloc (int nargs)
11159 struct cgraph_simd_clone *clone_info;
11160 size_t len = (sizeof (struct cgraph_simd_clone)
11161 + nargs * sizeof (struct cgraph_simd_clone_arg));
11162 clone_info = (struct cgraph_simd_clone *)
11163 ggc_internal_cleared_alloc (len);
11164 return clone_info;
11167 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
11169 static inline void
11170 simd_clone_struct_copy (struct cgraph_simd_clone *to,
11171 struct cgraph_simd_clone *from)
11173 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
11174 + ((from->nargs - from->inbranch)
11175 * sizeof (struct cgraph_simd_clone_arg))));
11178 /* Return vector of parameter types of function FNDECL. This uses
11179 TYPE_ARG_TYPES if available, otherwise falls back to types of
11180 DECL_ARGUMENTS types. */
11182 vec<tree>
11183 simd_clone_vector_of_formal_parm_types (tree fndecl)
11185 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
11186 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
11187 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
11188 unsigned int i;
11189 tree arg;
11190 FOR_EACH_VEC_ELT (args, i, arg)
11191 args[i] = TREE_TYPE (args[i]);
11192 return args;
11195 /* Given a simd function in NODE, extract the simd specific
11196 information from the OMP clauses passed in CLAUSES, and return
11197 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
11198 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
11199 otherwise set to FALSE. */
11201 static struct cgraph_simd_clone *
11202 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
11203 bool *inbranch_specified)
11205 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
11206 tree t;
11207 int n;
11208 *inbranch_specified = false;
11210 n = args.length ();
11211 if (n > 0 && args.last () == void_type_node)
11212 n--;
11214 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
11215 be cloned have a distinctive artificial label in addition to "omp
11216 declare simd". */
11217 bool cilk_clone
11218 = (flag_cilkplus
11219 && lookup_attribute ("cilk simd function",
11220 DECL_ATTRIBUTES (node->decl)));
11222 /* Allocate one more than needed just in case this is an in-branch
11223 clone which will require a mask argument. */
11224 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
11225 clone_info->nargs = n;
11226 clone_info->cilk_elemental = cilk_clone;
11228 if (!clauses)
11230 args.release ();
11231 return clone_info;
11233 clauses = TREE_VALUE (clauses);
11234 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
11235 return clone_info;
11237 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
11239 switch (OMP_CLAUSE_CODE (t))
11241 case OMP_CLAUSE_INBRANCH:
11242 clone_info->inbranch = 1;
11243 *inbranch_specified = true;
11244 break;
11245 case OMP_CLAUSE_NOTINBRANCH:
11246 clone_info->inbranch = 0;
11247 *inbranch_specified = true;
11248 break;
11249 case OMP_CLAUSE_SIMDLEN:
11250 clone_info->simdlen
11251 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
11252 break;
11253 case OMP_CLAUSE_LINEAR:
11255 tree decl = OMP_CLAUSE_DECL (t);
11256 tree step = OMP_CLAUSE_LINEAR_STEP (t);
11257 int argno = TREE_INT_CST_LOW (decl);
11258 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
11260 clone_info->args[argno].arg_type
11261 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
11262 clone_info->args[argno].linear_step = tree_to_shwi (step);
11263 gcc_assert (clone_info->args[argno].linear_step >= 0
11264 && clone_info->args[argno].linear_step < n);
11266 else
11268 if (POINTER_TYPE_P (args[argno]))
11269 step = fold_convert (ssizetype, step);
11270 if (!tree_fits_shwi_p (step))
11272 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11273 "ignoring large linear step");
11274 args.release ();
11275 return NULL;
11277 else if (integer_zerop (step))
11279 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11280 "ignoring zero linear step");
11281 args.release ();
11282 return NULL;
11284 else
11286 clone_info->args[argno].arg_type
11287 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
11288 clone_info->args[argno].linear_step = tree_to_shwi (step);
11291 break;
11293 case OMP_CLAUSE_UNIFORM:
11295 tree decl = OMP_CLAUSE_DECL (t);
11296 int argno = tree_to_uhwi (decl);
11297 clone_info->args[argno].arg_type
11298 = SIMD_CLONE_ARG_TYPE_UNIFORM;
11299 break;
11301 case OMP_CLAUSE_ALIGNED:
11303 tree decl = OMP_CLAUSE_DECL (t);
11304 int argno = tree_to_uhwi (decl);
11305 clone_info->args[argno].alignment
11306 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
11307 break;
11309 default:
11310 break;
11313 args.release ();
11314 return clone_info;
11317 /* Given a SIMD clone in NODE, calculate the characteristic data
11318 type and return the coresponding type. The characteristic data
11319 type is computed as described in the Intel Vector ABI. */
11321 static tree
11322 simd_clone_compute_base_data_type (struct cgraph_node *node,
11323 struct cgraph_simd_clone *clone_info)
11325 tree type = integer_type_node;
11326 tree fndecl = node->decl;
11328 /* a) For non-void function, the characteristic data type is the
11329 return type. */
11330 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
11331 type = TREE_TYPE (TREE_TYPE (fndecl));
11333 /* b) If the function has any non-uniform, non-linear parameters,
11334 then the characteristic data type is the type of the first
11335 such parameter. */
11336 else
11338 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
11339 for (unsigned int i = 0; i < clone_info->nargs; ++i)
11340 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
11342 type = map[i];
11343 break;
11345 map.release ();
11348 /* c) If the characteristic data type determined by a) or b) above
11349 is struct, union, or class type which is pass-by-value (except
11350 for the type that maps to the built-in complex data type), the
11351 characteristic data type is int. */
11352 if (RECORD_OR_UNION_TYPE_P (type)
11353 && !aggregate_value_p (type, NULL)
11354 && TREE_CODE (type) != COMPLEX_TYPE)
11355 return integer_type_node;
11357 /* d) If none of the above three classes is applicable, the
11358 characteristic data type is int. */
11360 return type;
11362 /* e) For Intel Xeon Phi native and offload compilation, if the
11363 resulting characteristic data type is 8-bit or 16-bit integer
11364 data type, the characteristic data type is int. */
11365 /* Well, we don't handle Xeon Phi yet. */
11368 static tree
11369 simd_clone_mangle (struct cgraph_node *node,
11370 struct cgraph_simd_clone *clone_info)
11372 char vecsize_mangle = clone_info->vecsize_mangle;
11373 char mask = clone_info->inbranch ? 'M' : 'N';
11374 unsigned int simdlen = clone_info->simdlen;
11375 unsigned int n;
11376 pretty_printer pp;
11378 gcc_assert (vecsize_mangle && simdlen);
11380 pp_string (&pp, "_ZGV");
11381 pp_character (&pp, vecsize_mangle);
11382 pp_character (&pp, mask);
11383 pp_decimal_int (&pp, simdlen);
11385 for (n = 0; n < clone_info->nargs; ++n)
11387 struct cgraph_simd_clone_arg arg = clone_info->args[n];
11389 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
11390 pp_character (&pp, 'u');
11391 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
11393 gcc_assert (arg.linear_step != 0);
11394 pp_character (&pp, 'l');
11395 if (arg.linear_step > 1)
11396 pp_unsigned_wide_integer (&pp, arg.linear_step);
11397 else if (arg.linear_step < 0)
11399 pp_character (&pp, 'n');
11400 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
11401 arg.linear_step));
11404 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
11406 pp_character (&pp, 's');
11407 pp_unsigned_wide_integer (&pp, arg.linear_step);
11409 else
11410 pp_character (&pp, 'v');
11411 if (arg.alignment)
11413 pp_character (&pp, 'a');
11414 pp_decimal_int (&pp, arg.alignment);
11418 pp_underscore (&pp);
11419 pp_string (&pp,
11420 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)));
11421 const char *str = pp_formatted_text (&pp);
11423 /* If there already is a SIMD clone with the same mangled name, don't
11424 add another one. This can happen e.g. for
11425 #pragma omp declare simd
11426 #pragma omp declare simd simdlen(8)
11427 int foo (int, int);
11428 if the simdlen is assumed to be 8 for the first one, etc. */
11429 for (struct cgraph_node *clone = node->simd_clones; clone;
11430 clone = clone->simdclone->next_clone)
11431 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
11432 str) == 0)
11433 return NULL_TREE;
11435 return get_identifier (str);
11438 /* Create a simd clone of OLD_NODE and return it. */
11440 static struct cgraph_node *
11441 simd_clone_create (struct cgraph_node *old_node)
11443 struct cgraph_node *new_node;
11444 if (old_node->definition)
11446 if (!old_node->has_gimple_body_p ())
11447 return NULL;
11448 old_node->get_body ();
11449 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
11450 false, NULL, NULL,
11451 "simdclone");
11453 else
11455 tree old_decl = old_node->decl;
11456 tree new_decl = copy_node (old_node->decl);
11457 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
11458 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
11459 SET_DECL_RTL (new_decl, NULL);
11460 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
11461 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
11462 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
11463 symtab->call_cgraph_insertion_hooks (new_node);
11465 if (new_node == NULL)
11466 return new_node;
11468 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
11470 /* The function cgraph_function_versioning () will force the new
11471 symbol local. Undo this, and inherit external visability from
11472 the old node. */
11473 new_node->local.local = old_node->local.local;
11474 new_node->externally_visible = old_node->externally_visible;
11476 return new_node;
11479 /* Adjust the return type of the given function to its appropriate
11480 vector counterpart. Returns a simd array to be used throughout the
11481 function as a return value. */
11483 static tree
11484 simd_clone_adjust_return_type (struct cgraph_node *node)
11486 tree fndecl = node->decl;
11487 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
11488 unsigned int veclen;
11489 tree t;
11491 /* Adjust the function return type. */
11492 if (orig_rettype == void_type_node)
11493 return NULL_TREE;
11494 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
11495 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))
11496 || POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl))))
11497 veclen = node->simdclone->vecsize_int;
11498 else
11499 veclen = node->simdclone->vecsize_float;
11500 veclen /= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl))));
11501 if (veclen > node->simdclone->simdlen)
11502 veclen = node->simdclone->simdlen;
11503 if (veclen == node->simdclone->simdlen)
11504 TREE_TYPE (TREE_TYPE (fndecl))
11505 = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)),
11506 node->simdclone->simdlen);
11507 else
11509 t = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)), veclen);
11510 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
11511 TREE_TYPE (TREE_TYPE (fndecl)) = t;
11513 if (!node->definition)
11514 return NULL_TREE;
11516 t = DECL_RESULT (fndecl);
11517 /* Adjust the DECL_RESULT. */
11518 gcc_assert (TREE_TYPE (t) != void_type_node);
11519 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
11520 relayout_decl (t);
11522 tree atype = build_array_type_nelts (orig_rettype,
11523 node->simdclone->simdlen);
11524 if (veclen != node->simdclone->simdlen)
11525 return build1 (VIEW_CONVERT_EXPR, atype, t);
11527 /* Set up a SIMD array to use as the return value. */
11528 tree retval = create_tmp_var_raw (atype, "retval");
11529 gimple_add_tmp_var (retval);
11530 return retval;
11533 /* Each vector argument has a corresponding array to be used locally
11534 as part of the eventual loop. Create such temporary array and
11535 return it.
11537 PREFIX is the prefix to be used for the temporary.
11539 TYPE is the inner element type.
11541 SIMDLEN is the number of elements. */
11543 static tree
11544 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
11546 tree atype = build_array_type_nelts (type, simdlen);
11547 tree avar = create_tmp_var_raw (atype, prefix);
11548 gimple_add_tmp_var (avar);
11549 return avar;
11552 /* Modify the function argument types to their corresponding vector
11553 counterparts if appropriate. Also, create one array for each simd
11554 argument to be used locally when using the function arguments as
11555 part of the loop.
11557 NODE is the function whose arguments are to be adjusted.
11559 Returns an adjustment vector that will be filled describing how the
11560 argument types will be adjusted. */
11562 static ipa_parm_adjustment_vec
11563 simd_clone_adjust_argument_types (struct cgraph_node *node)
11565 vec<tree> args;
11566 ipa_parm_adjustment_vec adjustments;
11568 if (node->definition)
11569 args = ipa_get_vector_of_formal_parms (node->decl);
11570 else
11571 args = simd_clone_vector_of_formal_parm_types (node->decl);
11572 adjustments.create (args.length ());
11573 unsigned i, j, veclen;
11574 struct ipa_parm_adjustment adj;
11575 for (i = 0; i < node->simdclone->nargs; ++i)
11577 memset (&adj, 0, sizeof (adj));
11578 tree parm = args[i];
11579 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
11580 adj.base_index = i;
11581 adj.base = parm;
11583 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
11584 node->simdclone->args[i].orig_type = parm_type;
11586 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
11588 /* No adjustment necessary for scalar arguments. */
11589 adj.op = IPA_PARM_OP_COPY;
11591 else
11593 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
11594 veclen = node->simdclone->vecsize_int;
11595 else
11596 veclen = node->simdclone->vecsize_float;
11597 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
11598 if (veclen > node->simdclone->simdlen)
11599 veclen = node->simdclone->simdlen;
11600 adj.arg_prefix = "simd";
11601 adj.type = build_vector_type (parm_type, veclen);
11602 node->simdclone->args[i].vector_type = adj.type;
11603 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11605 adjustments.safe_push (adj);
11606 if (j == veclen)
11608 memset (&adj, 0, sizeof (adj));
11609 adj.op = IPA_PARM_OP_NEW;
11610 adj.arg_prefix = "simd";
11611 adj.base_index = i;
11612 adj.type = node->simdclone->args[i].vector_type;
11616 if (node->definition)
11617 node->simdclone->args[i].simd_array
11618 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
11619 parm_type, node->simdclone->simdlen);
11621 adjustments.safe_push (adj);
11624 if (node->simdclone->inbranch)
11626 tree base_type
11627 = simd_clone_compute_base_data_type (node->simdclone->origin,
11628 node->simdclone);
11630 memset (&adj, 0, sizeof (adj));
11631 adj.op = IPA_PARM_OP_NEW;
11632 adj.arg_prefix = "mask";
11634 adj.base_index = i;
11635 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
11636 veclen = node->simdclone->vecsize_int;
11637 else
11638 veclen = node->simdclone->vecsize_float;
11639 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
11640 if (veclen > node->simdclone->simdlen)
11641 veclen = node->simdclone->simdlen;
11642 adj.type = build_vector_type (base_type, veclen);
11643 adjustments.safe_push (adj);
11645 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11646 adjustments.safe_push (adj);
11648 /* We have previously allocated one extra entry for the mask. Use
11649 it and fill it. */
11650 struct cgraph_simd_clone *sc = node->simdclone;
11651 sc->nargs++;
11652 if (node->definition)
11654 sc->args[i].orig_arg
11655 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
11656 sc->args[i].simd_array
11657 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
11659 sc->args[i].orig_type = base_type;
11660 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
11663 if (node->definition)
11664 ipa_modify_formal_parameters (node->decl, adjustments);
11665 else
11667 tree new_arg_types = NULL_TREE, new_reversed;
11668 bool last_parm_void = false;
11669 if (args.length () > 0 && args.last () == void_type_node)
11670 last_parm_void = true;
11672 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
11673 j = adjustments.length ();
11674 for (i = 0; i < j; i++)
11676 struct ipa_parm_adjustment *adj = &adjustments[i];
11677 tree ptype;
11678 if (adj->op == IPA_PARM_OP_COPY)
11679 ptype = args[adj->base_index];
11680 else
11681 ptype = adj->type;
11682 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
11684 new_reversed = nreverse (new_arg_types);
11685 if (last_parm_void)
11687 if (new_reversed)
11688 TREE_CHAIN (new_arg_types) = void_list_node;
11689 else
11690 new_reversed = void_list_node;
11693 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
11694 TYPE_ARG_TYPES (new_type) = new_reversed;
11695 TREE_TYPE (node->decl) = new_type;
11697 adjustments.release ();
11699 args.release ();
11700 return adjustments;
11703 /* Initialize and copy the function arguments in NODE to their
11704 corresponding local simd arrays. Returns a fresh gimple_seq with
11705 the instruction sequence generated. */
11707 static gimple_seq
11708 simd_clone_init_simd_arrays (struct cgraph_node *node,
11709 ipa_parm_adjustment_vec adjustments)
11711 gimple_seq seq = NULL;
11712 unsigned i = 0, j = 0, k;
11714 for (tree arg = DECL_ARGUMENTS (node->decl);
11715 arg;
11716 arg = DECL_CHAIN (arg), i++, j++)
11718 if (adjustments[j].op == IPA_PARM_OP_COPY)
11719 continue;
11721 node->simdclone->args[i].vector_arg = arg;
11723 tree array = node->simdclone->args[i].simd_array;
11724 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
11726 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11727 tree ptr = build_fold_addr_expr (array);
11728 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11729 build_int_cst (ptype, 0));
11730 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11731 gimplify_and_add (t, &seq);
11733 else
11735 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
11736 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11737 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
11739 tree ptr = build_fold_addr_expr (array);
11740 int elemsize;
11741 if (k)
11743 arg = DECL_CHAIN (arg);
11744 j++;
11746 elemsize
11747 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
11748 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11749 build_int_cst (ptype, k * elemsize));
11750 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11751 gimplify_and_add (t, &seq);
11755 return seq;
11758 /* Callback info for ipa_simd_modify_stmt_ops below. */
11760 struct modify_stmt_info {
11761 ipa_parm_adjustment_vec adjustments;
11762 gimple stmt;
11763 /* True if the parent statement was modified by
11764 ipa_simd_modify_stmt_ops. */
11765 bool modified;
11768 /* Callback for walk_gimple_op.
11770 Adjust operands from a given statement as specified in the
11771 adjustments vector in the callback data. */
11773 static tree
11774 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
11776 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
11777 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
11778 tree *orig_tp = tp;
11779 if (TREE_CODE (*tp) == ADDR_EXPR)
11780 tp = &TREE_OPERAND (*tp, 0);
11781 struct ipa_parm_adjustment *cand = NULL;
11782 if (TREE_CODE (*tp) == PARM_DECL)
11783 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
11784 else
11786 if (TYPE_P (*tp))
11787 *walk_subtrees = 0;
11790 tree repl = NULL_TREE;
11791 if (cand)
11792 repl = unshare_expr (cand->new_decl);
11793 else
11795 if (tp != orig_tp)
11797 *walk_subtrees = 0;
11798 bool modified = info->modified;
11799 info->modified = false;
11800 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
11801 if (!info->modified)
11803 info->modified = modified;
11804 return NULL_TREE;
11806 info->modified = modified;
11807 repl = *tp;
11809 else
11810 return NULL_TREE;
11813 if (tp != orig_tp)
11815 repl = build_fold_addr_expr (repl);
11816 gimple stmt;
11817 if (is_gimple_debug (info->stmt))
11819 tree vexpr = make_node (DEBUG_EXPR_DECL);
11820 stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
11821 DECL_ARTIFICIAL (vexpr) = 1;
11822 TREE_TYPE (vexpr) = TREE_TYPE (repl);
11823 DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (repl));
11824 repl = vexpr;
11826 else
11828 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl),
11829 NULL), repl);
11830 repl = gimple_assign_lhs (stmt);
11832 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
11833 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11834 *orig_tp = repl;
11836 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
11838 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
11839 *tp = vce;
11841 else
11842 *tp = repl;
11844 info->modified = true;
11845 return NULL_TREE;
11848 /* Traverse the function body and perform all modifications as
11849 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11850 modified such that the replacement/reduction value will now be an
11851 offset into the corresponding simd_array.
11853 This function will replace all function argument uses with their
11854 corresponding simd array elements, and ajust the return values
11855 accordingly. */
11857 static void
11858 ipa_simd_modify_function_body (struct cgraph_node *node,
11859 ipa_parm_adjustment_vec adjustments,
11860 tree retval_array, tree iter)
11862 basic_block bb;
11863 unsigned int i, j, l;
11865 /* Re-use the adjustments array, but this time use it to replace
11866 every function argument use to an offset into the corresponding
11867 simd_array. */
11868 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
11870 if (!node->simdclone->args[i].vector_arg)
11871 continue;
11873 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
11874 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
11875 adjustments[j].new_decl
11876 = build4 (ARRAY_REF,
11877 basetype,
11878 node->simdclone->args[i].simd_array,
11879 iter,
11880 NULL_TREE, NULL_TREE);
11881 if (adjustments[j].op == IPA_PARM_OP_NONE
11882 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
11883 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
11886 l = adjustments.length ();
11887 for (i = 1; i < num_ssa_names; i++)
11889 tree name = ssa_name (i);
11890 if (name
11891 && SSA_NAME_VAR (name)
11892 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
11894 for (j = 0; j < l; j++)
11895 if (SSA_NAME_VAR (name) == adjustments[j].base
11896 && adjustments[j].new_decl)
11898 tree base_var;
11899 if (adjustments[j].new_ssa_base == NULL_TREE)
11901 base_var
11902 = copy_var_decl (adjustments[j].base,
11903 DECL_NAME (adjustments[j].base),
11904 TREE_TYPE (adjustments[j].base));
11905 adjustments[j].new_ssa_base = base_var;
11907 else
11908 base_var = adjustments[j].new_ssa_base;
11909 if (SSA_NAME_IS_DEFAULT_DEF (name))
11911 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11912 gimple_stmt_iterator gsi = gsi_after_labels (bb);
11913 tree new_decl = unshare_expr (adjustments[j].new_decl);
11914 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
11915 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11916 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
11917 gimple stmt = gimple_build_assign (name, new_decl);
11918 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11920 else
11921 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11926 struct modify_stmt_info info;
11927 info.adjustments = adjustments;
11929 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
11931 gimple_stmt_iterator gsi;
11933 gsi = gsi_start_bb (bb);
11934 while (!gsi_end_p (gsi))
11936 gimple stmt = gsi_stmt (gsi);
11937 info.stmt = stmt;
11938 struct walk_stmt_info wi;
11940 memset (&wi, 0, sizeof (wi));
11941 info.modified = false;
11942 wi.info = &info;
11943 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
11945 if (gimple_code (stmt) == GIMPLE_RETURN)
11947 tree retval = gimple_return_retval (stmt);
11948 if (!retval)
11950 gsi_remove (&gsi, true);
11951 continue;
11954 /* Replace `return foo' with `retval_array[iter] = foo'. */
11955 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
11956 retval_array, iter, NULL, NULL);
11957 stmt = gimple_build_assign (ref, retval);
11958 gsi_replace (&gsi, stmt, true);
11959 info.modified = true;
11962 if (info.modified)
11964 update_stmt (stmt);
11965 if (maybe_clean_eh_stmt (stmt))
11966 gimple_purge_dead_eh_edges (gimple_bb (stmt));
11968 gsi_next (&gsi);
11973 /* Adjust the argument types in NODE to their appropriate vector
11974 counterparts. */
11976 static void
11977 simd_clone_adjust (struct cgraph_node *node)
11979 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
11981 targetm.simd_clone.adjust (node);
11983 tree retval = simd_clone_adjust_return_type (node);
11984 ipa_parm_adjustment_vec adjustments
11985 = simd_clone_adjust_argument_types (node);
11987 push_gimplify_context ();
11989 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
11991 /* Adjust all uses of vector arguments accordingly. Adjust all
11992 return values accordingly. */
11993 tree iter = create_tmp_var (unsigned_type_node, "iter");
11994 tree iter1 = make_ssa_name (iter, NULL);
11995 tree iter2 = make_ssa_name (iter, NULL);
11996 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
11998 /* Initialize the iteration variable. */
11999 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12000 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
12001 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
12002 /* Insert the SIMD array and iv initialization at function
12003 entry. */
12004 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
12006 pop_gimplify_context (NULL);
12008 /* Create a new BB right before the original exit BB, to hold the
12009 iteration increment and the condition/branch. */
12010 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
12011 basic_block incr_bb = create_empty_bb (orig_exit);
12012 add_bb_to_loop (incr_bb, body_bb->loop_father);
12013 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
12014 flag. Set it now to be a FALLTHRU_EDGE. */
12015 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
12016 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
12017 for (unsigned i = 0;
12018 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
12020 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
12021 redirect_edge_succ (e, incr_bb);
12023 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
12024 e->probability = REG_BR_PROB_BASE;
12025 gsi = gsi_last_bb (incr_bb);
12026 gimple g = gimple_build_assign_with_ops (PLUS_EXPR, iter2, iter1,
12027 build_int_cst (unsigned_type_node,
12028 1));
12029 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12031 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
12032 struct loop *loop = alloc_loop ();
12033 cfun->has_force_vectorize_loops = true;
12034 loop->safelen = node->simdclone->simdlen;
12035 loop->force_vectorize = true;
12036 loop->header = body_bb;
12038 /* Branch around the body if the mask applies. */
12039 if (node->simdclone->inbranch)
12041 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
12042 tree mask_array
12043 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
12044 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)), NULL);
12045 tree aref = build4 (ARRAY_REF,
12046 TREE_TYPE (TREE_TYPE (mask_array)),
12047 mask_array, iter1,
12048 NULL, NULL);
12049 g = gimple_build_assign (mask, aref);
12050 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12051 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
12052 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
12054 aref = build1 (VIEW_CONVERT_EXPR,
12055 build_nonstandard_integer_type (bitsize, 0), mask);
12056 mask = make_ssa_name (TREE_TYPE (aref), NULL);
12057 g = gimple_build_assign (mask, aref);
12058 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12061 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
12062 NULL, NULL);
12063 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12064 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
12065 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
12068 /* Generate the condition. */
12069 g = gimple_build_cond (LT_EXPR,
12070 iter2,
12071 build_int_cst (unsigned_type_node,
12072 node->simdclone->simdlen),
12073 NULL, NULL);
12074 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12075 e = split_block (incr_bb, gsi_stmt (gsi));
12076 basic_block latch_bb = e->dest;
12077 basic_block new_exit_bb;
12078 new_exit_bb = split_block (latch_bb, NULL)->dest;
12079 loop->latch = latch_bb;
12081 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
12083 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
12084 /* The successor of incr_bb is already pointing to latch_bb; just
12085 change the flags.
12086 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
12087 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
12089 gimple_phi phi = create_phi_node (iter1, body_bb);
12090 edge preheader_edge = find_edge (entry_bb, body_bb);
12091 edge latch_edge = single_succ_edge (latch_bb);
12092 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
12093 UNKNOWN_LOCATION);
12094 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12096 /* Generate the new return. */
12097 gsi = gsi_last_bb (new_exit_bb);
12098 if (retval
12099 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
12100 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
12101 retval = TREE_OPERAND (retval, 0);
12102 else if (retval)
12104 retval = build1 (VIEW_CONVERT_EXPR,
12105 TREE_TYPE (TREE_TYPE (node->decl)),
12106 retval);
12107 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
12108 false, GSI_CONTINUE_LINKING);
12110 g = gimple_build_return (retval);
12111 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12113 /* Handle aligned clauses by replacing default defs of the aligned
12114 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
12115 lhs. Handle linear by adding PHIs. */
12116 for (unsigned i = 0; i < node->simdclone->nargs; i++)
12117 if (node->simdclone->args[i].alignment
12118 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
12119 && (node->simdclone->args[i].alignment
12120 & (node->simdclone->args[i].alignment - 1)) == 0
12121 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
12122 == POINTER_TYPE)
12124 unsigned int alignment = node->simdclone->args[i].alignment;
12125 tree orig_arg = node->simdclone->args[i].orig_arg;
12126 tree def = ssa_default_def (cfun, orig_arg);
12127 if (def && !has_zero_uses (def))
12129 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
12130 gimple_seq seq = NULL;
12131 bool need_cvt = false;
12132 gimple call
12133 = gimple_build_call (fn, 2, def, size_int (alignment));
12134 g = call;
12135 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
12136 ptr_type_node))
12137 need_cvt = true;
12138 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg, NULL);
12139 gimple_call_set_lhs (g, t);
12140 gimple_seq_add_stmt_without_update (&seq, g);
12141 if (need_cvt)
12143 t = make_ssa_name (orig_arg, NULL);
12144 g = gimple_build_assign_with_ops (NOP_EXPR, t,
12145 gimple_call_lhs (g),
12146 NULL_TREE);
12147 gimple_seq_add_stmt_without_update (&seq, g);
12149 gsi_insert_seq_on_edge_immediate
12150 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
12152 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12153 int freq = compute_call_stmt_bb_frequency (current_function_decl,
12154 entry_bb);
12155 node->create_edge (cgraph_node::get_create (fn),
12156 call, entry_bb->count, freq);
12158 imm_use_iterator iter;
12159 use_operand_p use_p;
12160 gimple use_stmt;
12161 tree repl = gimple_get_lhs (g);
12162 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12163 if (is_gimple_debug (use_stmt) || use_stmt == call)
12164 continue;
12165 else
12166 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12167 SET_USE (use_p, repl);
12170 else if (node->simdclone->args[i].arg_type
12171 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
12173 tree orig_arg = node->simdclone->args[i].orig_arg;
12174 tree def = ssa_default_def (cfun, orig_arg);
12175 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12176 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
12177 if (def && !has_zero_uses (def))
12179 iter1 = make_ssa_name (orig_arg, NULL);
12180 iter2 = make_ssa_name (orig_arg, NULL);
12181 phi = create_phi_node (iter1, body_bb);
12182 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
12183 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12184 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12185 ? PLUS_EXPR : POINTER_PLUS_EXPR;
12186 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12187 ? TREE_TYPE (orig_arg) : sizetype;
12188 tree addcst
12189 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
12190 g = gimple_build_assign_with_ops (code, iter2, iter1, addcst);
12191 gsi = gsi_last_bb (incr_bb);
12192 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
12194 imm_use_iterator iter;
12195 use_operand_p use_p;
12196 gimple use_stmt;
12197 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12198 if (use_stmt == phi)
12199 continue;
12200 else
12201 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12202 SET_USE (use_p, iter1);
12206 calculate_dominance_info (CDI_DOMINATORS);
12207 add_loop (loop, loop->header->loop_father);
12208 update_ssa (TODO_update_ssa);
12210 pop_cfun ();
12213 /* If the function in NODE is tagged as an elemental SIMD function,
12214 create the appropriate SIMD clones. */
12216 static void
12217 expand_simd_clones (struct cgraph_node *node)
12219 tree attr = lookup_attribute ("omp declare simd",
12220 DECL_ATTRIBUTES (node->decl));
12221 if (attr == NULL_TREE
12222 || node->global.inlined_to
12223 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
12224 return;
12226 /* Ignore
12227 #pragma omp declare simd
12228 extern int foo ();
12229 in C, there we don't know the argument types at all. */
12230 if (!node->definition
12231 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
12232 return;
12236 /* Start with parsing the "omp declare simd" attribute(s). */
12237 bool inbranch_clause_specified;
12238 struct cgraph_simd_clone *clone_info
12239 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
12240 &inbranch_clause_specified);
12241 if (clone_info == NULL)
12242 continue;
12244 int orig_simdlen = clone_info->simdlen;
12245 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
12246 /* The target can return 0 (no simd clones should be created),
12247 1 (just one ISA of simd clones should be created) or higher
12248 count of ISA variants. In that case, clone_info is initialized
12249 for the first ISA variant. */
12250 int count
12251 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
12252 base_type, 0);
12253 if (count == 0)
12254 continue;
12256 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
12257 also create one inbranch and one !inbranch clone of it. */
12258 for (int i = 0; i < count * 2; i++)
12260 struct cgraph_simd_clone *clone = clone_info;
12261 if (inbranch_clause_specified && (i & 1) != 0)
12262 continue;
12264 if (i != 0)
12266 clone = simd_clone_struct_alloc (clone_info->nargs
12267 + ((i & 1) != 0));
12268 simd_clone_struct_copy (clone, clone_info);
12269 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
12270 and simd_clone_adjust_argument_types did to the first
12271 clone's info. */
12272 clone->nargs -= clone_info->inbranch;
12273 clone->simdlen = orig_simdlen;
12274 /* And call the target hook again to get the right ISA. */
12275 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
12276 base_type,
12277 i / 2);
12278 if ((i & 1) != 0)
12279 clone->inbranch = 1;
12282 /* simd_clone_mangle might fail if such a clone has been created
12283 already. */
12284 tree id = simd_clone_mangle (node, clone);
12285 if (id == NULL_TREE)
12286 continue;
12288 /* Only when we are sure we want to create the clone actually
12289 clone the function (or definitions) or create another
12290 extern FUNCTION_DECL (for prototypes without definitions). */
12291 struct cgraph_node *n = simd_clone_create (node);
12292 if (n == NULL)
12293 continue;
12295 n->simdclone = clone;
12296 clone->origin = node;
12297 clone->next_clone = NULL;
12298 if (node->simd_clones == NULL)
12300 clone->prev_clone = n;
12301 node->simd_clones = n;
12303 else
12305 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
12306 clone->prev_clone->simdclone->next_clone = n;
12307 node->simd_clones->simdclone->prev_clone = n;
12309 symtab->change_decl_assembler_name (n->decl, id);
12310 /* And finally adjust the return type, parameters and for
12311 definitions also function body. */
12312 if (node->definition)
12313 simd_clone_adjust (n);
12314 else
12316 simd_clone_adjust_return_type (n);
12317 simd_clone_adjust_argument_types (n);
12321 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
12324 /* Entry point for IPA simd clone creation pass. */
12326 static unsigned int
12327 ipa_omp_simd_clone (void)
12329 struct cgraph_node *node;
12330 FOR_EACH_FUNCTION (node)
12331 expand_simd_clones (node);
12332 return 0;
12335 namespace {
12337 const pass_data pass_data_omp_simd_clone =
12339 SIMPLE_IPA_PASS, /* type */
12340 "simdclone", /* name */
12341 OPTGROUP_NONE, /* optinfo_flags */
12342 TV_NONE, /* tv_id */
12343 ( PROP_ssa | PROP_cfg ), /* properties_required */
12344 0, /* properties_provided */
12345 0, /* properties_destroyed */
12346 0, /* todo_flags_start */
12347 0, /* todo_flags_finish */
12350 class pass_omp_simd_clone : public simple_ipa_opt_pass
12352 public:
12353 pass_omp_simd_clone(gcc::context *ctxt)
12354 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
12357 /* opt_pass methods: */
12358 virtual bool gate (function *);
12359 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
12362 bool
12363 pass_omp_simd_clone::gate (function *)
12365 return ((flag_openmp || flag_openmp_simd
12366 || flag_cilkplus
12367 || (in_lto_p && !flag_wpa))
12368 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
12371 } // anon namespace
12373 simple_ipa_opt_pass *
12374 make_pass_omp_simd_clone (gcc::context *ctxt)
12376 return new pass_omp_simd_clone (ctxt);
12379 #include "gt-omp-low.h"