Concretize gimple_call_set_fntype
[official-gcc.git] / gcc / omp-low.c
blob993206f259576e49f62d231eee8fb15273a8bd33
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "rtl.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "gimple-fold.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-walk.h"
43 #include "tree-iterator.h"
44 #include "tree-inline.h"
45 #include "langhooks.h"
46 #include "diagnostic-core.h"
47 #include "gimple-ssa.h"
48 #include "cgraph.h"
49 #include "tree-cfg.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "tree-ssanames.h"
53 #include "tree-into-ssa.h"
54 #include "expr.h"
55 #include "tree-dfa.h"
56 #include "tree-ssa.h"
57 #include "flags.h"
58 #include "function.h"
59 #include "expr.h"
60 #include "tree-pass.h"
61 #include "except.h"
62 #include "splay-tree.h"
63 #include "optabs.h"
64 #include "cfgloop.h"
65 #include "target.h"
66 #include "omp-low.h"
67 #include "gimple-low.h"
68 #include "tree-cfgcleanup.h"
69 #include "pretty-print.h"
70 #include "ipa-prop.h"
71 #include "tree-nested.h"
72 #include "tree-eh.h"
73 #include "cilk.h"
76 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
77 phases. The first phase scans the function looking for OMP statements
78 and then for variables that must be replaced to satisfy data sharing
79 clauses. The second phase expands code for the constructs, as well as
80 re-gimplifying things when variables have been replaced with complex
81 expressions.
83 Final code generation is done by pass_expand_omp. The flowgraph is
84 scanned for parallel regions which are then moved to a new
85 function, to be invoked by the thread library. */
87 /* Parallel region information. Every parallel and workshare
88 directive is enclosed between two markers, the OMP_* directive
89 and a corresponding OMP_RETURN statement. */
91 struct omp_region
93 /* The enclosing region. */
94 struct omp_region *outer;
96 /* First child region. */
97 struct omp_region *inner;
99 /* Next peer region. */
100 struct omp_region *next;
102 /* Block containing the omp directive as its last stmt. */
103 basic_block entry;
105 /* Block containing the OMP_RETURN as its last stmt. */
106 basic_block exit;
108 /* Block containing the OMP_CONTINUE as its last stmt. */
109 basic_block cont;
111 /* If this is a combined parallel+workshare region, this is a list
112 of additional arguments needed by the combined parallel+workshare
113 library call. */
114 vec<tree, va_gc> *ws_args;
116 /* The code for the omp directive of this region. */
117 enum gimple_code type;
119 /* Schedule kind, only used for OMP_FOR type regions. */
120 enum omp_clause_schedule_kind sched_kind;
122 /* True if this is a combined parallel+workshare region. */
123 bool is_combined_parallel;
126 /* Context structure. Used to store information about each parallel
127 directive in the code. */
129 typedef struct omp_context
131 /* This field must be at the beginning, as we do "inheritance": Some
132 callback functions for tree-inline.c (e.g., omp_copy_decl)
133 receive a copy_body_data pointer that is up-casted to an
134 omp_context pointer. */
135 copy_body_data cb;
137 /* The tree of contexts corresponding to the encountered constructs. */
138 struct omp_context *outer;
139 gimple stmt;
141 /* Map variables to fields in a structure that allows communication
142 between sending and receiving threads. */
143 splay_tree field_map;
144 tree record_type;
145 tree sender_decl;
146 tree receiver_decl;
148 /* These are used just by task contexts, if task firstprivate fn is
149 needed. srecord_type is used to communicate from the thread
150 that encountered the task construct to task firstprivate fn,
151 record_type is allocated by GOMP_task, initialized by task firstprivate
152 fn and passed to the task body fn. */
153 splay_tree sfield_map;
154 tree srecord_type;
156 /* A chain of variables to add to the top-level block surrounding the
157 construct. In the case of a parallel, this is in the child function. */
158 tree block_vars;
160 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
161 barriers should jump to during omplower pass. */
162 tree cancel_label;
164 /* What to do with variables with implicitly determined sharing
165 attributes. */
166 enum omp_clause_default_kind default_kind;
168 /* Nesting depth of this context. Used to beautify error messages re
169 invalid gotos. The outermost ctx is depth 1, with depth 0 being
170 reserved for the main body of the function. */
171 int depth;
173 /* True if this parallel directive is nested within another. */
174 bool is_nested;
176 /* True if this construct can be cancelled. */
177 bool cancellable;
178 } omp_context;
181 struct omp_for_data_loop
183 tree v, n1, n2, step;
184 enum tree_code cond_code;
187 /* A structure describing the main elements of a parallel loop. */
189 struct omp_for_data
191 struct omp_for_data_loop loop;
192 tree chunk_size;
193 gimple_omp_for for_stmt;
194 tree pre, iter_type;
195 int collapse;
196 bool have_nowait, have_ordered;
197 enum omp_clause_schedule_kind sched_kind;
198 struct omp_for_data_loop *loops;
202 static splay_tree all_contexts;
203 static int taskreg_nesting_level;
204 static int target_nesting_level;
205 static struct omp_region *root_omp_region;
206 static bitmap task_shared_vars;
207 static vec<omp_context *> taskreg_contexts;
209 static void scan_omp (gimple_seq *, omp_context *);
210 static tree scan_omp_1_op (tree *, int *, void *);
212 #define WALK_SUBSTMTS \
213 case GIMPLE_BIND: \
214 case GIMPLE_TRY: \
215 case GIMPLE_CATCH: \
216 case GIMPLE_EH_FILTER: \
217 case GIMPLE_TRANSACTION: \
218 /* The sub-statements for these should be walked. */ \
219 *handled_ops_p = false; \
220 break;
222 /* Convenience function for calling scan_omp_1_op on tree operands. */
224 static inline tree
225 scan_omp_op (tree *tp, omp_context *ctx)
227 struct walk_stmt_info wi;
229 memset (&wi, 0, sizeof (wi));
230 wi.info = ctx;
231 wi.want_locations = true;
233 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
236 static void lower_omp (gimple_seq *, omp_context *);
237 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
238 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
240 /* Find an OpenMP clause of type KIND within CLAUSES. */
242 tree
243 find_omp_clause (tree clauses, enum omp_clause_code kind)
245 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
246 if (OMP_CLAUSE_CODE (clauses) == kind)
247 return clauses;
249 return NULL_TREE;
252 /* Return true if CTX is for an omp parallel. */
254 static inline bool
255 is_parallel_ctx (omp_context *ctx)
257 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
261 /* Return true if CTX is for an omp task. */
263 static inline bool
264 is_task_ctx (omp_context *ctx)
266 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
270 /* Return true if CTX is for an omp parallel or omp task. */
272 static inline bool
273 is_taskreg_ctx (omp_context *ctx)
275 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
276 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
280 /* Return true if REGION is a combined parallel+workshare region. */
282 static inline bool
283 is_combined_parallel (struct omp_region *region)
285 return region->is_combined_parallel;
289 /* Extract the header elements of parallel loop FOR_STMT and store
290 them into *FD. */
292 static void
293 extract_omp_for_data (gimple_omp_for for_stmt, struct omp_for_data *fd,
294 struct omp_for_data_loop *loops)
296 tree t, var, *collapse_iter, *collapse_count;
297 tree count = NULL_TREE, iter_type = long_integer_type_node;
298 struct omp_for_data_loop *loop;
299 int i;
300 struct omp_for_data_loop dummy_loop;
301 location_t loc = gimple_location (for_stmt);
302 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
303 bool distribute = gimple_omp_for_kind (for_stmt)
304 == GF_OMP_FOR_KIND_DISTRIBUTE;
306 fd->for_stmt = for_stmt;
307 fd->pre = NULL;
308 fd->collapse = gimple_omp_for_collapse (for_stmt);
309 if (fd->collapse > 1)
310 fd->loops = loops;
311 else
312 fd->loops = &fd->loop;
314 fd->have_nowait = distribute || simd;
315 fd->have_ordered = false;
316 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
317 fd->chunk_size = NULL_TREE;
318 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
319 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
320 collapse_iter = NULL;
321 collapse_count = NULL;
323 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
324 switch (OMP_CLAUSE_CODE (t))
326 case OMP_CLAUSE_NOWAIT:
327 fd->have_nowait = true;
328 break;
329 case OMP_CLAUSE_ORDERED:
330 fd->have_ordered = true;
331 break;
332 case OMP_CLAUSE_SCHEDULE:
333 gcc_assert (!distribute);
334 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
335 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
336 break;
337 case OMP_CLAUSE_DIST_SCHEDULE:
338 gcc_assert (distribute);
339 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
340 break;
341 case OMP_CLAUSE_COLLAPSE:
342 if (fd->collapse > 1)
344 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
345 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
347 break;
348 default:
349 break;
352 /* FIXME: for now map schedule(auto) to schedule(static).
353 There should be analysis to determine whether all iterations
354 are approximately the same amount of work (then schedule(static)
355 is best) or if it varies (then schedule(dynamic,N) is better). */
356 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
358 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
359 gcc_assert (fd->chunk_size == NULL);
361 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
362 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
363 gcc_assert (fd->chunk_size == NULL);
364 else if (fd->chunk_size == NULL)
366 /* We only need to compute a default chunk size for ordered
367 static loops and dynamic loops. */
368 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
369 || fd->have_ordered)
370 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
371 ? integer_zero_node : integer_one_node;
374 for (i = 0; i < fd->collapse; i++)
376 if (fd->collapse == 1)
377 loop = &fd->loop;
378 else if (loops != NULL)
379 loop = loops + i;
380 else
381 loop = &dummy_loop;
383 loop->v = gimple_omp_for_index (for_stmt, i);
384 gcc_assert (SSA_VAR_P (loop->v));
385 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
386 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
387 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
388 loop->n1 = gimple_omp_for_initial (for_stmt, i);
390 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
391 loop->n2 = gimple_omp_for_final (for_stmt, i);
392 switch (loop->cond_code)
394 case LT_EXPR:
395 case GT_EXPR:
396 break;
397 case NE_EXPR:
398 gcc_assert (gimple_omp_for_kind (for_stmt)
399 == GF_OMP_FOR_KIND_CILKSIMD
400 || (gimple_omp_for_kind (for_stmt)
401 == GF_OMP_FOR_KIND_CILKFOR));
402 break;
403 case LE_EXPR:
404 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
405 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
406 else
407 loop->n2 = fold_build2_loc (loc,
408 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
409 build_int_cst (TREE_TYPE (loop->n2), 1));
410 loop->cond_code = LT_EXPR;
411 break;
412 case GE_EXPR:
413 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
414 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
415 else
416 loop->n2 = fold_build2_loc (loc,
417 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
418 build_int_cst (TREE_TYPE (loop->n2), 1));
419 loop->cond_code = GT_EXPR;
420 break;
421 default:
422 gcc_unreachable ();
425 t = gimple_omp_for_incr (for_stmt, i);
426 gcc_assert (TREE_OPERAND (t, 0) == var);
427 switch (TREE_CODE (t))
429 case PLUS_EXPR:
430 loop->step = TREE_OPERAND (t, 1);
431 break;
432 case POINTER_PLUS_EXPR:
433 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
434 break;
435 case MINUS_EXPR:
436 loop->step = TREE_OPERAND (t, 1);
437 loop->step = fold_build1_loc (loc,
438 NEGATE_EXPR, TREE_TYPE (loop->step),
439 loop->step);
440 break;
441 default:
442 gcc_unreachable ();
445 if (simd
446 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
447 && !fd->have_ordered))
449 if (fd->collapse == 1)
450 iter_type = TREE_TYPE (loop->v);
451 else if (i == 0
452 || TYPE_PRECISION (iter_type)
453 < TYPE_PRECISION (TREE_TYPE (loop->v)))
454 iter_type
455 = build_nonstandard_integer_type
456 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
458 else if (iter_type != long_long_unsigned_type_node)
460 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
461 iter_type = long_long_unsigned_type_node;
462 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
463 && TYPE_PRECISION (TREE_TYPE (loop->v))
464 >= TYPE_PRECISION (iter_type))
466 tree n;
468 if (loop->cond_code == LT_EXPR)
469 n = fold_build2_loc (loc,
470 PLUS_EXPR, TREE_TYPE (loop->v),
471 loop->n2, loop->step);
472 else
473 n = loop->n1;
474 if (TREE_CODE (n) != INTEGER_CST
475 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
476 iter_type = long_long_unsigned_type_node;
478 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
479 > TYPE_PRECISION (iter_type))
481 tree n1, n2;
483 if (loop->cond_code == LT_EXPR)
485 n1 = loop->n1;
486 n2 = fold_build2_loc (loc,
487 PLUS_EXPR, TREE_TYPE (loop->v),
488 loop->n2, loop->step);
490 else
492 n1 = fold_build2_loc (loc,
493 MINUS_EXPR, TREE_TYPE (loop->v),
494 loop->n2, loop->step);
495 n2 = loop->n1;
497 if (TREE_CODE (n1) != INTEGER_CST
498 || TREE_CODE (n2) != INTEGER_CST
499 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
500 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
501 iter_type = long_long_unsigned_type_node;
505 if (collapse_count && *collapse_count == NULL)
507 t = fold_binary (loop->cond_code, boolean_type_node,
508 fold_convert (TREE_TYPE (loop->v), loop->n1),
509 fold_convert (TREE_TYPE (loop->v), loop->n2));
510 if (t && integer_zerop (t))
511 count = build_zero_cst (long_long_unsigned_type_node);
512 else if ((i == 0 || count != NULL_TREE)
513 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
514 && TREE_CONSTANT (loop->n1)
515 && TREE_CONSTANT (loop->n2)
516 && TREE_CODE (loop->step) == INTEGER_CST)
518 tree itype = TREE_TYPE (loop->v);
520 if (POINTER_TYPE_P (itype))
521 itype = signed_type_for (itype);
522 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
523 t = fold_build2_loc (loc,
524 PLUS_EXPR, itype,
525 fold_convert_loc (loc, itype, loop->step), t);
526 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
527 fold_convert_loc (loc, itype, loop->n2));
528 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
529 fold_convert_loc (loc, itype, loop->n1));
530 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
531 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
532 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
533 fold_build1_loc (loc, NEGATE_EXPR, itype,
534 fold_convert_loc (loc, itype,
535 loop->step)));
536 else
537 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
538 fold_convert_loc (loc, itype, loop->step));
539 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
540 if (count != NULL_TREE)
541 count = fold_build2_loc (loc,
542 MULT_EXPR, long_long_unsigned_type_node,
543 count, t);
544 else
545 count = t;
546 if (TREE_CODE (count) != INTEGER_CST)
547 count = NULL_TREE;
549 else if (count && !integer_zerop (count))
550 count = NULL_TREE;
554 if (count
555 && !simd
556 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
557 || fd->have_ordered))
559 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
560 iter_type = long_long_unsigned_type_node;
561 else
562 iter_type = long_integer_type_node;
564 else if (collapse_iter && *collapse_iter != NULL)
565 iter_type = TREE_TYPE (*collapse_iter);
566 fd->iter_type = iter_type;
567 if (collapse_iter && *collapse_iter == NULL)
568 *collapse_iter = create_tmp_var (iter_type, ".iter");
569 if (collapse_count && *collapse_count == NULL)
571 if (count)
572 *collapse_count = fold_convert_loc (loc, iter_type, count);
573 else
574 *collapse_count = create_tmp_var (iter_type, ".count");
577 if (fd->collapse > 1)
579 fd->loop.v = *collapse_iter;
580 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
581 fd->loop.n2 = *collapse_count;
582 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
583 fd->loop.cond_code = LT_EXPR;
588 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
589 is the immediate dominator of PAR_ENTRY_BB, return true if there
590 are no data dependencies that would prevent expanding the parallel
591 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
593 When expanding a combined parallel+workshare region, the call to
594 the child function may need additional arguments in the case of
595 GIMPLE_OMP_FOR regions. In some cases, these arguments are
596 computed out of variables passed in from the parent to the child
597 via 'struct .omp_data_s'. For instance:
599 #pragma omp parallel for schedule (guided, i * 4)
600 for (j ...)
602 Is lowered into:
604 # BLOCK 2 (PAR_ENTRY_BB)
605 .omp_data_o.i = i;
606 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
608 # BLOCK 3 (WS_ENTRY_BB)
609 .omp_data_i = &.omp_data_o;
610 D.1667 = .omp_data_i->i;
611 D.1598 = D.1667 * 4;
612 #pragma omp for schedule (guided, D.1598)
614 When we outline the parallel region, the call to the child function
615 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
616 that value is computed *after* the call site. So, in principle we
617 cannot do the transformation.
619 To see whether the code in WS_ENTRY_BB blocks the combined
620 parallel+workshare call, we collect all the variables used in the
621 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
622 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
623 call.
625 FIXME. If we had the SSA form built at this point, we could merely
626 hoist the code in block 3 into block 2 and be done with it. But at
627 this point we don't have dataflow information and though we could
628 hack something up here, it is really not worth the aggravation. */
630 static bool
631 workshare_safe_to_combine_p (basic_block ws_entry_bb)
633 struct omp_for_data fd;
634 gimple ws_stmt = last_stmt (ws_entry_bb);
636 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
637 return true;
639 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
641 extract_omp_for_data (as_a <gimple_omp_for> (ws_stmt), &fd, NULL);
643 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
644 return false;
645 if (fd.iter_type != long_integer_type_node)
646 return false;
648 /* FIXME. We give up too easily here. If any of these arguments
649 are not constants, they will likely involve variables that have
650 been mapped into fields of .omp_data_s for sharing with the child
651 function. With appropriate data flow, it would be possible to
652 see through this. */
653 if (!is_gimple_min_invariant (fd.loop.n1)
654 || !is_gimple_min_invariant (fd.loop.n2)
655 || !is_gimple_min_invariant (fd.loop.step)
656 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
657 return false;
659 return true;
663 /* Collect additional arguments needed to emit a combined
664 parallel+workshare call. WS_STMT is the workshare directive being
665 expanded. */
667 static vec<tree, va_gc> *
668 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
670 tree t;
671 location_t loc = gimple_location (ws_stmt);
672 vec<tree, va_gc> *ws_args;
674 if (gimple_omp_for for_stmt = dyn_cast <gimple_omp_for> (ws_stmt))
676 struct omp_for_data fd;
677 tree n1, n2;
679 extract_omp_for_data (for_stmt, &fd, NULL);
680 n1 = fd.loop.n1;
681 n2 = fd.loop.n2;
683 if (gimple_omp_for_combined_into_p (for_stmt))
685 tree innerc
686 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
687 OMP_CLAUSE__LOOPTEMP_);
688 gcc_assert (innerc);
689 n1 = OMP_CLAUSE_DECL (innerc);
690 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
691 OMP_CLAUSE__LOOPTEMP_);
692 gcc_assert (innerc);
693 n2 = OMP_CLAUSE_DECL (innerc);
696 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
698 t = fold_convert_loc (loc, long_integer_type_node, n1);
699 ws_args->quick_push (t);
701 t = fold_convert_loc (loc, long_integer_type_node, n2);
702 ws_args->quick_push (t);
704 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
705 ws_args->quick_push (t);
707 if (fd.chunk_size)
709 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
710 ws_args->quick_push (t);
713 return ws_args;
715 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
717 /* Number of sections is equal to the number of edges from the
718 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
719 the exit of the sections region. */
720 basic_block bb = single_succ (gimple_bb (ws_stmt));
721 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
722 vec_alloc (ws_args, 1);
723 ws_args->quick_push (t);
724 return ws_args;
727 gcc_unreachable ();
731 /* Discover whether REGION is a combined parallel+workshare region. */
733 static void
734 determine_parallel_type (struct omp_region *region)
736 basic_block par_entry_bb, par_exit_bb;
737 basic_block ws_entry_bb, ws_exit_bb;
739 if (region == NULL || region->inner == NULL
740 || region->exit == NULL || region->inner->exit == NULL
741 || region->inner->cont == NULL)
742 return;
744 /* We only support parallel+for and parallel+sections. */
745 if (region->type != GIMPLE_OMP_PARALLEL
746 || (region->inner->type != GIMPLE_OMP_FOR
747 && region->inner->type != GIMPLE_OMP_SECTIONS))
748 return;
750 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
751 WS_EXIT_BB -> PAR_EXIT_BB. */
752 par_entry_bb = region->entry;
753 par_exit_bb = region->exit;
754 ws_entry_bb = region->inner->entry;
755 ws_exit_bb = region->inner->exit;
757 if (single_succ (par_entry_bb) == ws_entry_bb
758 && single_succ (ws_exit_bb) == par_exit_bb
759 && workshare_safe_to_combine_p (ws_entry_bb)
760 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
761 || (last_and_only_stmt (ws_entry_bb)
762 && last_and_only_stmt (par_exit_bb))))
764 gimple par_stmt = last_stmt (par_entry_bb);
765 gimple ws_stmt = last_stmt (ws_entry_bb);
767 if (region->inner->type == GIMPLE_OMP_FOR)
769 /* If this is a combined parallel loop, we need to determine
770 whether or not to use the combined library calls. There
771 are two cases where we do not apply the transformation:
772 static loops and any kind of ordered loop. In the first
773 case, we already open code the loop so there is no need
774 to do anything else. In the latter case, the combined
775 parallel loop call would still need extra synchronization
776 to implement ordered semantics, so there would not be any
777 gain in using the combined call. */
778 tree clauses = gimple_omp_for_clauses (ws_stmt);
779 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
780 if (c == NULL
781 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
782 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
784 region->is_combined_parallel = false;
785 region->inner->is_combined_parallel = false;
786 return;
790 region->is_combined_parallel = true;
791 region->inner->is_combined_parallel = true;
792 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
797 /* Return true if EXPR is variable sized. */
799 static inline bool
800 is_variable_sized (const_tree expr)
802 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
805 /* Return true if DECL is a reference type. */
807 static inline bool
808 is_reference (tree decl)
810 return lang_hooks.decls.omp_privatize_by_reference (decl);
813 /* Lookup variables in the decl or field splay trees. The "maybe" form
814 allows for the variable form to not have been entered, otherwise we
815 assert that the variable must have been entered. */
817 static inline tree
818 lookup_decl (tree var, omp_context *ctx)
820 tree *n = ctx->cb.decl_map->get (var);
821 return *n;
824 static inline tree
825 maybe_lookup_decl (const_tree var, omp_context *ctx)
827 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
828 return n ? *n : NULL_TREE;
831 static inline tree
832 lookup_field (tree var, omp_context *ctx)
834 splay_tree_node n;
835 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
836 return (tree) n->value;
839 static inline tree
840 lookup_sfield (tree var, omp_context *ctx)
842 splay_tree_node n;
843 n = splay_tree_lookup (ctx->sfield_map
844 ? ctx->sfield_map : ctx->field_map,
845 (splay_tree_key) var);
846 return (tree) n->value;
849 static inline tree
850 maybe_lookup_field (tree var, omp_context *ctx)
852 splay_tree_node n;
853 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
854 return n ? (tree) n->value : NULL_TREE;
857 /* Return true if DECL should be copied by pointer. SHARED_CTX is
858 the parallel context if DECL is to be shared. */
860 static bool
861 use_pointer_for_field (tree decl, omp_context *shared_ctx)
863 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
864 return true;
866 /* We can only use copy-in/copy-out semantics for shared variables
867 when we know the value is not accessible from an outer scope. */
868 if (shared_ctx)
870 /* ??? Trivially accessible from anywhere. But why would we even
871 be passing an address in this case? Should we simply assert
872 this to be false, or should we have a cleanup pass that removes
873 these from the list of mappings? */
874 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
875 return true;
877 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
878 without analyzing the expression whether or not its location
879 is accessible to anyone else. In the case of nested parallel
880 regions it certainly may be. */
881 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
882 return true;
884 /* Do not use copy-in/copy-out for variables that have their
885 address taken. */
886 if (TREE_ADDRESSABLE (decl))
887 return true;
889 /* lower_send_shared_vars only uses copy-in, but not copy-out
890 for these. */
891 if (TREE_READONLY (decl)
892 || ((TREE_CODE (decl) == RESULT_DECL
893 || TREE_CODE (decl) == PARM_DECL)
894 && DECL_BY_REFERENCE (decl)))
895 return false;
897 /* Disallow copy-in/out in nested parallel if
898 decl is shared in outer parallel, otherwise
899 each thread could store the shared variable
900 in its own copy-in location, making the
901 variable no longer really shared. */
902 if (shared_ctx->is_nested)
904 omp_context *up;
906 for (up = shared_ctx->outer; up; up = up->outer)
907 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
908 break;
910 if (up)
912 tree c;
914 for (c = gimple_omp_taskreg_clauses (up->stmt);
915 c; c = OMP_CLAUSE_CHAIN (c))
916 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
917 && OMP_CLAUSE_DECL (c) == decl)
918 break;
920 if (c)
921 goto maybe_mark_addressable_and_ret;
925 /* For tasks avoid using copy-in/out. As tasks can be
926 deferred or executed in different thread, when GOMP_task
927 returns, the task hasn't necessarily terminated. */
928 if (is_task_ctx (shared_ctx))
930 tree outer;
931 maybe_mark_addressable_and_ret:
932 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
933 if (is_gimple_reg (outer))
935 /* Taking address of OUTER in lower_send_shared_vars
936 might need regimplification of everything that uses the
937 variable. */
938 if (!task_shared_vars)
939 task_shared_vars = BITMAP_ALLOC (NULL);
940 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
941 TREE_ADDRESSABLE (outer) = 1;
943 return true;
947 return false;
950 /* Construct a new automatic decl similar to VAR. */
952 static tree
953 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
955 tree copy = copy_var_decl (var, name, type);
957 DECL_CONTEXT (copy) = current_function_decl;
958 DECL_CHAIN (copy) = ctx->block_vars;
959 ctx->block_vars = copy;
961 return copy;
964 static tree
965 omp_copy_decl_1 (tree var, omp_context *ctx)
967 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
970 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
971 as appropriate. */
972 static tree
973 omp_build_component_ref (tree obj, tree field)
975 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
976 if (TREE_THIS_VOLATILE (field))
977 TREE_THIS_VOLATILE (ret) |= 1;
978 if (TREE_READONLY (field))
979 TREE_READONLY (ret) |= 1;
980 return ret;
983 /* Build tree nodes to access the field for VAR on the receiver side. */
985 static tree
986 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
988 tree x, field = lookup_field (var, ctx);
990 /* If the receiver record type was remapped in the child function,
991 remap the field into the new record type. */
992 x = maybe_lookup_field (field, ctx);
993 if (x != NULL)
994 field = x;
996 x = build_simple_mem_ref (ctx->receiver_decl);
997 x = omp_build_component_ref (x, field);
998 if (by_ref)
999 x = build_simple_mem_ref (x);
1001 return x;
1004 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1005 of a parallel, this is a component reference; for workshare constructs
1006 this is some variable. */
1008 static tree
1009 build_outer_var_ref (tree var, omp_context *ctx)
1011 tree x;
1013 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1014 x = var;
1015 else if (is_variable_sized (var))
1017 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1018 x = build_outer_var_ref (x, ctx);
1019 x = build_simple_mem_ref (x);
1021 else if (is_taskreg_ctx (ctx))
1023 bool by_ref = use_pointer_for_field (var, NULL);
1024 x = build_receiver_ref (var, by_ref, ctx);
1026 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1027 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1029 /* #pragma omp simd isn't a worksharing construct, and can reference even
1030 private vars in its linear etc. clauses. */
1031 x = NULL_TREE;
1032 if (ctx->outer && is_taskreg_ctx (ctx))
1033 x = lookup_decl (var, ctx->outer);
1034 else if (ctx->outer)
1035 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1036 if (x == NULL_TREE)
1037 x = var;
1039 else if (ctx->outer)
1040 x = lookup_decl (var, ctx->outer);
1041 else if (is_reference (var))
1042 /* This can happen with orphaned constructs. If var is reference, it is
1043 possible it is shared and as such valid. */
1044 x = var;
1045 else
1046 gcc_unreachable ();
1048 if (is_reference (var))
1049 x = build_simple_mem_ref (x);
1051 return x;
1054 /* Build tree nodes to access the field for VAR on the sender side. */
1056 static tree
1057 build_sender_ref (tree var, omp_context *ctx)
1059 tree field = lookup_sfield (var, ctx);
1060 return omp_build_component_ref (ctx->sender_decl, field);
1063 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1065 static void
1066 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1068 tree field, type, sfield = NULL_TREE;
1070 gcc_assert ((mask & 1) == 0
1071 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1072 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1073 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1075 type = TREE_TYPE (var);
1076 if (mask & 4)
1078 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1079 type = build_pointer_type (build_pointer_type (type));
1081 else if (by_ref)
1082 type = build_pointer_type (type);
1083 else if ((mask & 3) == 1 && is_reference (var))
1084 type = TREE_TYPE (type);
1086 field = build_decl (DECL_SOURCE_LOCATION (var),
1087 FIELD_DECL, DECL_NAME (var), type);
1089 /* Remember what variable this field was created for. This does have a
1090 side effect of making dwarf2out ignore this member, so for helpful
1091 debugging we clear it later in delete_omp_context. */
1092 DECL_ABSTRACT_ORIGIN (field) = var;
1093 if (type == TREE_TYPE (var))
1095 DECL_ALIGN (field) = DECL_ALIGN (var);
1096 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1097 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1099 else
1100 DECL_ALIGN (field) = TYPE_ALIGN (type);
1102 if ((mask & 3) == 3)
1104 insert_field_into_struct (ctx->record_type, field);
1105 if (ctx->srecord_type)
1107 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1108 FIELD_DECL, DECL_NAME (var), type);
1109 DECL_ABSTRACT_ORIGIN (sfield) = var;
1110 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1111 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1112 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1113 insert_field_into_struct (ctx->srecord_type, sfield);
1116 else
1118 if (ctx->srecord_type == NULL_TREE)
1120 tree t;
1122 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1123 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1124 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1126 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1127 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1128 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1129 insert_field_into_struct (ctx->srecord_type, sfield);
1130 splay_tree_insert (ctx->sfield_map,
1131 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1132 (splay_tree_value) sfield);
1135 sfield = field;
1136 insert_field_into_struct ((mask & 1) ? ctx->record_type
1137 : ctx->srecord_type, field);
1140 if (mask & 1)
1141 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1142 (splay_tree_value) field);
1143 if ((mask & 2) && ctx->sfield_map)
1144 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1145 (splay_tree_value) sfield);
1148 static tree
1149 install_var_local (tree var, omp_context *ctx)
1151 tree new_var = omp_copy_decl_1 (var, ctx);
1152 insert_decl_map (&ctx->cb, var, new_var);
1153 return new_var;
1156 /* Adjust the replacement for DECL in CTX for the new context. This means
1157 copying the DECL_VALUE_EXPR, and fixing up the type. */
1159 static void
1160 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1162 tree new_decl, size;
1164 new_decl = lookup_decl (decl, ctx);
1166 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1168 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1169 && DECL_HAS_VALUE_EXPR_P (decl))
1171 tree ve = DECL_VALUE_EXPR (decl);
1172 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1173 SET_DECL_VALUE_EXPR (new_decl, ve);
1174 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1177 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1179 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1180 if (size == error_mark_node)
1181 size = TYPE_SIZE (TREE_TYPE (new_decl));
1182 DECL_SIZE (new_decl) = size;
1184 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1185 if (size == error_mark_node)
1186 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1187 DECL_SIZE_UNIT (new_decl) = size;
1191 /* The callback for remap_decl. Search all containing contexts for a
1192 mapping of the variable; this avoids having to duplicate the splay
1193 tree ahead of time. We know a mapping doesn't already exist in the
1194 given context. Create new mappings to implement default semantics. */
1196 static tree
1197 omp_copy_decl (tree var, copy_body_data *cb)
1199 omp_context *ctx = (omp_context *) cb;
1200 tree new_var;
1202 if (TREE_CODE (var) == LABEL_DECL)
1204 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1205 DECL_CONTEXT (new_var) = current_function_decl;
1206 insert_decl_map (&ctx->cb, var, new_var);
1207 return new_var;
1210 while (!is_taskreg_ctx (ctx))
1212 ctx = ctx->outer;
1213 if (ctx == NULL)
1214 return var;
1215 new_var = maybe_lookup_decl (var, ctx);
1216 if (new_var)
1217 return new_var;
1220 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1221 return var;
1223 return error_mark_node;
1227 /* Debugging dumps for parallel regions. */
1228 void dump_omp_region (FILE *, struct omp_region *, int);
1229 void debug_omp_region (struct omp_region *);
1230 void debug_all_omp_regions (void);
1232 /* Dump the parallel region tree rooted at REGION. */
1234 void
1235 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1237 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1238 gimple_code_name[region->type]);
1240 if (region->inner)
1241 dump_omp_region (file, region->inner, indent + 4);
1243 if (region->cont)
1245 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1246 region->cont->index);
1249 if (region->exit)
1250 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1251 region->exit->index);
1252 else
1253 fprintf (file, "%*s[no exit marker]\n", indent, "");
1255 if (region->next)
1256 dump_omp_region (file, region->next, indent);
1259 DEBUG_FUNCTION void
1260 debug_omp_region (struct omp_region *region)
1262 dump_omp_region (stderr, region, 0);
1265 DEBUG_FUNCTION void
1266 debug_all_omp_regions (void)
1268 dump_omp_region (stderr, root_omp_region, 0);
1272 /* Create a new parallel region starting at STMT inside region PARENT. */
1274 static struct omp_region *
1275 new_omp_region (basic_block bb, enum gimple_code type,
1276 struct omp_region *parent)
1278 struct omp_region *region = XCNEW (struct omp_region);
1280 region->outer = parent;
1281 region->entry = bb;
1282 region->type = type;
1284 if (parent)
1286 /* This is a nested region. Add it to the list of inner
1287 regions in PARENT. */
1288 region->next = parent->inner;
1289 parent->inner = region;
1291 else
1293 /* This is a toplevel region. Add it to the list of toplevel
1294 regions in ROOT_OMP_REGION. */
1295 region->next = root_omp_region;
1296 root_omp_region = region;
1299 return region;
1302 /* Release the memory associated with the region tree rooted at REGION. */
1304 static void
1305 free_omp_region_1 (struct omp_region *region)
1307 struct omp_region *i, *n;
1309 for (i = region->inner; i ; i = n)
1311 n = i->next;
1312 free_omp_region_1 (i);
1315 free (region);
1318 /* Release the memory for the entire omp region tree. */
1320 void
1321 free_omp_regions (void)
1323 struct omp_region *r, *n;
1324 for (r = root_omp_region; r ; r = n)
1326 n = r->next;
1327 free_omp_region_1 (r);
1329 root_omp_region = NULL;
1333 /* Create a new context, with OUTER_CTX being the surrounding context. */
1335 static omp_context *
1336 new_omp_context (gimple stmt, omp_context *outer_ctx)
1338 omp_context *ctx = XCNEW (omp_context);
1340 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1341 (splay_tree_value) ctx);
1342 ctx->stmt = stmt;
1344 if (outer_ctx)
1346 ctx->outer = outer_ctx;
1347 ctx->cb = outer_ctx->cb;
1348 ctx->cb.block = NULL;
1349 ctx->depth = outer_ctx->depth + 1;
1351 else
1353 ctx->cb.src_fn = current_function_decl;
1354 ctx->cb.dst_fn = current_function_decl;
1355 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1356 gcc_checking_assert (ctx->cb.src_node);
1357 ctx->cb.dst_node = ctx->cb.src_node;
1358 ctx->cb.src_cfun = cfun;
1359 ctx->cb.copy_decl = omp_copy_decl;
1360 ctx->cb.eh_lp_nr = 0;
1361 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1362 ctx->depth = 1;
1365 ctx->cb.decl_map = new hash_map<tree, tree>;
1367 return ctx;
1370 static gimple_seq maybe_catch_exception (gimple_seq);
1372 /* Finalize task copyfn. */
1374 static void
1375 finalize_task_copyfn (gimple_omp_task task_stmt)
1377 struct function *child_cfun;
1378 tree child_fn;
1379 gimple_seq seq = NULL, new_seq;
1380 gimple_bind bind;
1382 child_fn = gimple_omp_task_copy_fn (task_stmt);
1383 if (child_fn == NULL_TREE)
1384 return;
1386 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1387 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1389 push_cfun (child_cfun);
1390 bind = gimplify_body (child_fn, false);
1391 gimple_seq_add_stmt (&seq, bind);
1392 new_seq = maybe_catch_exception (seq);
1393 if (new_seq != seq)
1395 bind = gimple_build_bind (NULL, new_seq, NULL);
1396 seq = NULL;
1397 gimple_seq_add_stmt (&seq, bind);
1399 gimple_set_body (child_fn, seq);
1400 pop_cfun ();
1402 /* Inform the callgraph about the new function. */
1403 cgraph_node::add_new_function (child_fn, false);
1406 /* Destroy a omp_context data structures. Called through the splay tree
1407 value delete callback. */
1409 static void
1410 delete_omp_context (splay_tree_value value)
1412 omp_context *ctx = (omp_context *) value;
1414 delete ctx->cb.decl_map;
1416 if (ctx->field_map)
1417 splay_tree_delete (ctx->field_map);
1418 if (ctx->sfield_map)
1419 splay_tree_delete (ctx->sfield_map);
1421 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1422 it produces corrupt debug information. */
1423 if (ctx->record_type)
1425 tree t;
1426 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1427 DECL_ABSTRACT_ORIGIN (t) = NULL;
1429 if (ctx->srecord_type)
1431 tree t;
1432 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1433 DECL_ABSTRACT_ORIGIN (t) = NULL;
1436 if (is_task_ctx (ctx))
1437 finalize_task_copyfn (as_a <gimple_omp_task> (ctx->stmt));
1439 XDELETE (ctx);
1442 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1443 context. */
1445 static void
1446 fixup_child_record_type (omp_context *ctx)
1448 tree f, type = ctx->record_type;
1450 /* ??? It isn't sufficient to just call remap_type here, because
1451 variably_modified_type_p doesn't work the way we expect for
1452 record types. Testing each field for whether it needs remapping
1453 and creating a new record by hand works, however. */
1454 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1455 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1456 break;
1457 if (f)
1459 tree name, new_fields = NULL;
1461 type = lang_hooks.types.make_type (RECORD_TYPE);
1462 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1463 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1464 TYPE_DECL, name, type);
1465 TYPE_NAME (type) = name;
1467 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1469 tree new_f = copy_node (f);
1470 DECL_CONTEXT (new_f) = type;
1471 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1472 DECL_CHAIN (new_f) = new_fields;
1473 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1474 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1475 &ctx->cb, NULL);
1476 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1477 &ctx->cb, NULL);
1478 new_fields = new_f;
1480 /* Arrange to be able to look up the receiver field
1481 given the sender field. */
1482 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1483 (splay_tree_value) new_f);
1485 TYPE_FIELDS (type) = nreverse (new_fields);
1486 layout_type (type);
1489 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1492 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1493 specified by CLAUSES. */
1495 static void
1496 scan_sharing_clauses (tree clauses, omp_context *ctx)
1498 tree c, decl;
1499 bool scan_array_reductions = false;
1501 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1503 bool by_ref;
1505 switch (OMP_CLAUSE_CODE (c))
1507 case OMP_CLAUSE_PRIVATE:
1508 decl = OMP_CLAUSE_DECL (c);
1509 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1510 goto do_private;
1511 else if (!is_variable_sized (decl))
1512 install_var_local (decl, ctx);
1513 break;
1515 case OMP_CLAUSE_SHARED:
1516 decl = OMP_CLAUSE_DECL (c);
1517 /* Ignore shared directives in teams construct. */
1518 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1520 /* Global variables don't need to be copied,
1521 the receiver side will use them directly. */
1522 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1523 if (is_global_var (odecl))
1524 break;
1525 insert_decl_map (&ctx->cb, decl, odecl);
1526 break;
1528 gcc_assert (is_taskreg_ctx (ctx));
1529 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1530 || !is_variable_sized (decl));
1531 /* Global variables don't need to be copied,
1532 the receiver side will use them directly. */
1533 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1534 break;
1535 by_ref = use_pointer_for_field (decl, ctx);
1536 if (! TREE_READONLY (decl)
1537 || TREE_ADDRESSABLE (decl)
1538 || by_ref
1539 || is_reference (decl))
1541 install_var_field (decl, by_ref, 3, ctx);
1542 install_var_local (decl, ctx);
1543 break;
1545 /* We don't need to copy const scalar vars back. */
1546 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1547 goto do_private;
1549 case OMP_CLAUSE_LASTPRIVATE:
1550 /* Let the corresponding firstprivate clause create
1551 the variable. */
1552 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1553 break;
1554 /* FALLTHRU */
1556 case OMP_CLAUSE_FIRSTPRIVATE:
1557 case OMP_CLAUSE_REDUCTION:
1558 case OMP_CLAUSE_LINEAR:
1559 decl = OMP_CLAUSE_DECL (c);
1560 do_private:
1561 if (is_variable_sized (decl))
1563 if (is_task_ctx (ctx))
1564 install_var_field (decl, false, 1, ctx);
1565 break;
1567 else if (is_taskreg_ctx (ctx))
1569 bool global
1570 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1571 by_ref = use_pointer_for_field (decl, NULL);
1573 if (is_task_ctx (ctx)
1574 && (global || by_ref || is_reference (decl)))
1576 install_var_field (decl, false, 1, ctx);
1577 if (!global)
1578 install_var_field (decl, by_ref, 2, ctx);
1580 else if (!global)
1581 install_var_field (decl, by_ref, 3, ctx);
1583 install_var_local (decl, ctx);
1584 break;
1586 case OMP_CLAUSE__LOOPTEMP_:
1587 gcc_assert (is_parallel_ctx (ctx));
1588 decl = OMP_CLAUSE_DECL (c);
1589 install_var_field (decl, false, 3, ctx);
1590 install_var_local (decl, ctx);
1591 break;
1593 case OMP_CLAUSE_COPYPRIVATE:
1594 case OMP_CLAUSE_COPYIN:
1595 decl = OMP_CLAUSE_DECL (c);
1596 by_ref = use_pointer_for_field (decl, NULL);
1597 install_var_field (decl, by_ref, 3, ctx);
1598 break;
1600 case OMP_CLAUSE_DEFAULT:
1601 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1602 break;
1604 case OMP_CLAUSE_FINAL:
1605 case OMP_CLAUSE_IF:
1606 case OMP_CLAUSE_NUM_THREADS:
1607 case OMP_CLAUSE_NUM_TEAMS:
1608 case OMP_CLAUSE_THREAD_LIMIT:
1609 case OMP_CLAUSE_DEVICE:
1610 case OMP_CLAUSE_SCHEDULE:
1611 case OMP_CLAUSE_DIST_SCHEDULE:
1612 case OMP_CLAUSE_DEPEND:
1613 case OMP_CLAUSE__CILK_FOR_COUNT_:
1614 if (ctx->outer)
1615 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1616 break;
1618 case OMP_CLAUSE_TO:
1619 case OMP_CLAUSE_FROM:
1620 case OMP_CLAUSE_MAP:
1621 if (ctx->outer)
1622 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1623 decl = OMP_CLAUSE_DECL (c);
1624 /* Global variables with "omp declare target" attribute
1625 don't need to be copied, the receiver side will use them
1626 directly. */
1627 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1628 && DECL_P (decl)
1629 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1630 && lookup_attribute ("omp declare target",
1631 DECL_ATTRIBUTES (decl)))
1632 break;
1633 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1634 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1636 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1637 #pragma omp target data, there is nothing to map for
1638 those. */
1639 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1640 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1641 break;
1643 if (DECL_P (decl))
1645 if (DECL_SIZE (decl)
1646 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1648 tree decl2 = DECL_VALUE_EXPR (decl);
1649 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1650 decl2 = TREE_OPERAND (decl2, 0);
1651 gcc_assert (DECL_P (decl2));
1652 install_var_field (decl2, true, 3, ctx);
1653 install_var_local (decl2, ctx);
1654 install_var_local (decl, ctx);
1656 else
1658 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1659 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1660 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1661 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1662 install_var_field (decl, true, 7, ctx);
1663 else
1664 install_var_field (decl, true, 3, ctx);
1665 if (gimple_omp_target_kind (ctx->stmt)
1666 == GF_OMP_TARGET_KIND_REGION)
1667 install_var_local (decl, ctx);
1670 else
1672 tree base = get_base_address (decl);
1673 tree nc = OMP_CLAUSE_CHAIN (c);
1674 if (DECL_P (base)
1675 && nc != NULL_TREE
1676 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1677 && OMP_CLAUSE_DECL (nc) == base
1678 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1679 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1681 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1682 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1684 else
1686 if (ctx->outer)
1688 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1689 decl = OMP_CLAUSE_DECL (c);
1691 gcc_assert (!splay_tree_lookup (ctx->field_map,
1692 (splay_tree_key) decl));
1693 tree field
1694 = build_decl (OMP_CLAUSE_LOCATION (c),
1695 FIELD_DECL, NULL_TREE, ptr_type_node);
1696 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1697 insert_field_into_struct (ctx->record_type, field);
1698 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1699 (splay_tree_value) field);
1702 break;
1704 case OMP_CLAUSE_NOWAIT:
1705 case OMP_CLAUSE_ORDERED:
1706 case OMP_CLAUSE_COLLAPSE:
1707 case OMP_CLAUSE_UNTIED:
1708 case OMP_CLAUSE_MERGEABLE:
1709 case OMP_CLAUSE_PROC_BIND:
1710 case OMP_CLAUSE_SAFELEN:
1711 break;
1713 case OMP_CLAUSE_ALIGNED:
1714 decl = OMP_CLAUSE_DECL (c);
1715 if (is_global_var (decl)
1716 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1717 install_var_local (decl, ctx);
1718 break;
1720 default:
1721 gcc_unreachable ();
1725 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1727 switch (OMP_CLAUSE_CODE (c))
1729 case OMP_CLAUSE_LASTPRIVATE:
1730 /* Let the corresponding firstprivate clause create
1731 the variable. */
1732 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1733 scan_array_reductions = true;
1734 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1735 break;
1736 /* FALLTHRU */
1738 case OMP_CLAUSE_PRIVATE:
1739 case OMP_CLAUSE_FIRSTPRIVATE:
1740 case OMP_CLAUSE_REDUCTION:
1741 case OMP_CLAUSE_LINEAR:
1742 decl = OMP_CLAUSE_DECL (c);
1743 if (is_variable_sized (decl))
1744 install_var_local (decl, ctx);
1745 fixup_remapped_decl (decl, ctx,
1746 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1747 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1748 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1749 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1750 scan_array_reductions = true;
1751 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1752 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1753 scan_array_reductions = true;
1754 break;
1756 case OMP_CLAUSE_SHARED:
1757 /* Ignore shared directives in teams construct. */
1758 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1759 break;
1760 decl = OMP_CLAUSE_DECL (c);
1761 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1762 fixup_remapped_decl (decl, ctx, false);
1763 break;
1765 case OMP_CLAUSE_MAP:
1766 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1767 break;
1768 decl = OMP_CLAUSE_DECL (c);
1769 if (DECL_P (decl)
1770 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1771 && lookup_attribute ("omp declare target",
1772 DECL_ATTRIBUTES (decl)))
1773 break;
1774 if (DECL_P (decl))
1776 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1777 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1778 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1780 tree new_decl = lookup_decl (decl, ctx);
1781 TREE_TYPE (new_decl)
1782 = remap_type (TREE_TYPE (decl), &ctx->cb);
1784 else if (DECL_SIZE (decl)
1785 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1787 tree decl2 = DECL_VALUE_EXPR (decl);
1788 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1789 decl2 = TREE_OPERAND (decl2, 0);
1790 gcc_assert (DECL_P (decl2));
1791 fixup_remapped_decl (decl2, ctx, false);
1792 fixup_remapped_decl (decl, ctx, true);
1794 else
1795 fixup_remapped_decl (decl, ctx, false);
1797 break;
1799 case OMP_CLAUSE_COPYPRIVATE:
1800 case OMP_CLAUSE_COPYIN:
1801 case OMP_CLAUSE_DEFAULT:
1802 case OMP_CLAUSE_IF:
1803 case OMP_CLAUSE_NUM_THREADS:
1804 case OMP_CLAUSE_NUM_TEAMS:
1805 case OMP_CLAUSE_THREAD_LIMIT:
1806 case OMP_CLAUSE_DEVICE:
1807 case OMP_CLAUSE_SCHEDULE:
1808 case OMP_CLAUSE_DIST_SCHEDULE:
1809 case OMP_CLAUSE_NOWAIT:
1810 case OMP_CLAUSE_ORDERED:
1811 case OMP_CLAUSE_COLLAPSE:
1812 case OMP_CLAUSE_UNTIED:
1813 case OMP_CLAUSE_FINAL:
1814 case OMP_CLAUSE_MERGEABLE:
1815 case OMP_CLAUSE_PROC_BIND:
1816 case OMP_CLAUSE_SAFELEN:
1817 case OMP_CLAUSE_ALIGNED:
1818 case OMP_CLAUSE_DEPEND:
1819 case OMP_CLAUSE__LOOPTEMP_:
1820 case OMP_CLAUSE_TO:
1821 case OMP_CLAUSE_FROM:
1822 case OMP_CLAUSE__CILK_FOR_COUNT_:
1823 break;
1825 default:
1826 gcc_unreachable ();
1830 if (scan_array_reductions)
1831 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1832 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1833 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1835 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1836 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1838 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1839 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1840 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1841 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1842 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1843 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1846 /* Create a new name for omp child function. Returns an identifier. If
1847 IS_CILK_FOR is true then the suffix for the child function is
1848 "_cilk_for_fn." */
1850 static tree
1851 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
1853 if (is_cilk_for)
1854 return clone_function_name (current_function_decl, "_cilk_for_fn");
1855 return clone_function_name (current_function_decl,
1856 task_copy ? "_omp_cpyfn" : "_omp_fn");
1859 /* Returns the type of the induction variable for the child function for
1860 _Cilk_for and the types for _high and _low variables based on TYPE. */
1862 static tree
1863 cilk_for_check_loop_diff_type (tree type)
1865 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
1867 if (TYPE_UNSIGNED (type))
1868 return uint32_type_node;
1869 else
1870 return integer_type_node;
1872 else
1874 if (TYPE_UNSIGNED (type))
1875 return uint64_type_node;
1876 else
1877 return long_long_integer_type_node;
1881 /* Build a decl for the omp child function. It'll not contain a body
1882 yet, just the bare decl. */
1884 static void
1885 create_omp_child_function (omp_context *ctx, bool task_copy)
1887 tree decl, type, name, t;
1889 tree cilk_for_count
1890 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
1891 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1892 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
1893 tree cilk_var_type = NULL_TREE;
1895 name = create_omp_child_function_name (task_copy,
1896 cilk_for_count != NULL_TREE);
1897 if (task_copy)
1898 type = build_function_type_list (void_type_node, ptr_type_node,
1899 ptr_type_node, NULL_TREE);
1900 else if (cilk_for_count)
1902 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
1903 cilk_var_type = cilk_for_check_loop_diff_type (type);
1904 type = build_function_type_list (void_type_node, ptr_type_node,
1905 cilk_var_type, cilk_var_type, NULL_TREE);
1907 else
1908 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1910 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
1912 if (!task_copy)
1913 ctx->cb.dst_fn = decl;
1914 else
1915 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1917 TREE_STATIC (decl) = 1;
1918 TREE_USED (decl) = 1;
1919 DECL_ARTIFICIAL (decl) = 1;
1920 DECL_IGNORED_P (decl) = 0;
1921 TREE_PUBLIC (decl) = 0;
1922 DECL_UNINLINABLE (decl) = 1;
1923 DECL_EXTERNAL (decl) = 0;
1924 DECL_CONTEXT (decl) = NULL_TREE;
1925 DECL_INITIAL (decl) = make_node (BLOCK);
1926 bool target_p = false;
1927 if (lookup_attribute ("omp declare target",
1928 DECL_ATTRIBUTES (current_function_decl)))
1929 target_p = true;
1930 else
1932 omp_context *octx;
1933 for (octx = ctx; octx; octx = octx->outer)
1934 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1935 && gimple_omp_target_kind (octx->stmt)
1936 == GF_OMP_TARGET_KIND_REGION)
1938 target_p = true;
1939 break;
1942 if (target_p)
1943 DECL_ATTRIBUTES (decl)
1944 = tree_cons (get_identifier ("omp declare target"),
1945 NULL_TREE, DECL_ATTRIBUTES (decl));
1947 t = build_decl (DECL_SOURCE_LOCATION (decl),
1948 RESULT_DECL, NULL_TREE, void_type_node);
1949 DECL_ARTIFICIAL (t) = 1;
1950 DECL_IGNORED_P (t) = 1;
1951 DECL_CONTEXT (t) = decl;
1952 DECL_RESULT (decl) = t;
1954 /* _Cilk_for's child function requires two extra parameters called
1955 __low and __high that are set the by Cilk runtime when it calls this
1956 function. */
1957 if (cilk_for_count)
1959 t = build_decl (DECL_SOURCE_LOCATION (decl),
1960 PARM_DECL, get_identifier ("__high"), cilk_var_type);
1961 DECL_ARTIFICIAL (t) = 1;
1962 DECL_NAMELESS (t) = 1;
1963 DECL_ARG_TYPE (t) = ptr_type_node;
1964 DECL_CONTEXT (t) = current_function_decl;
1965 TREE_USED (t) = 1;
1966 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1967 DECL_ARGUMENTS (decl) = t;
1969 t = build_decl (DECL_SOURCE_LOCATION (decl),
1970 PARM_DECL, get_identifier ("__low"), cilk_var_type);
1971 DECL_ARTIFICIAL (t) = 1;
1972 DECL_NAMELESS (t) = 1;
1973 DECL_ARG_TYPE (t) = ptr_type_node;
1974 DECL_CONTEXT (t) = current_function_decl;
1975 TREE_USED (t) = 1;
1976 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1977 DECL_ARGUMENTS (decl) = t;
1980 tree data_name = get_identifier (".omp_data_i");
1981 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
1982 ptr_type_node);
1983 DECL_ARTIFICIAL (t) = 1;
1984 DECL_NAMELESS (t) = 1;
1985 DECL_ARG_TYPE (t) = ptr_type_node;
1986 DECL_CONTEXT (t) = current_function_decl;
1987 TREE_USED (t) = 1;
1988 if (cilk_for_count)
1989 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1990 DECL_ARGUMENTS (decl) = t;
1991 if (!task_copy)
1992 ctx->receiver_decl = t;
1993 else
1995 t = build_decl (DECL_SOURCE_LOCATION (decl),
1996 PARM_DECL, get_identifier (".omp_data_o"),
1997 ptr_type_node);
1998 DECL_ARTIFICIAL (t) = 1;
1999 DECL_NAMELESS (t) = 1;
2000 DECL_ARG_TYPE (t) = ptr_type_node;
2001 DECL_CONTEXT (t) = current_function_decl;
2002 TREE_USED (t) = 1;
2003 TREE_ADDRESSABLE (t) = 1;
2004 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2005 DECL_ARGUMENTS (decl) = t;
2008 /* Allocate memory for the function structure. The call to
2009 allocate_struct_function clobbers CFUN, so we need to restore
2010 it afterward. */
2011 push_struct_function (decl);
2012 cfun->function_end_locus = gimple_location (ctx->stmt);
2013 pop_cfun ();
2016 /* Callback for walk_gimple_seq. Check if combined parallel
2017 contains gimple_omp_for_combined_into_p OMP_FOR. */
2019 static tree
2020 find_combined_for (gimple_stmt_iterator *gsi_p,
2021 bool *handled_ops_p,
2022 struct walk_stmt_info *wi)
2024 gimple stmt = gsi_stmt (*gsi_p);
2026 *handled_ops_p = true;
2027 switch (gimple_code (stmt))
2029 WALK_SUBSTMTS;
2031 case GIMPLE_OMP_FOR:
2032 if (gimple_omp_for_combined_into_p (stmt)
2033 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2035 wi->info = stmt;
2036 return integer_zero_node;
2038 break;
2039 default:
2040 break;
2042 return NULL;
2045 /* Scan an OpenMP parallel directive. */
2047 static void
2048 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2050 omp_context *ctx;
2051 tree name;
2052 gimple_omp_parallel stmt = as_a <gimple_omp_parallel> (gsi_stmt (*gsi));
2054 /* Ignore parallel directives with empty bodies, unless there
2055 are copyin clauses. */
2056 if (optimize > 0
2057 && empty_body_p (gimple_omp_body (stmt))
2058 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2059 OMP_CLAUSE_COPYIN) == NULL)
2061 gsi_replace (gsi, gimple_build_nop (), false);
2062 return;
2065 if (gimple_omp_parallel_combined_p (stmt))
2067 struct walk_stmt_info wi;
2069 memset (&wi, 0, sizeof (wi));
2070 wi.val_only = true;
2071 walk_gimple_seq (gimple_omp_body (stmt),
2072 find_combined_for, NULL, &wi);
2073 if (wi.info)
2075 gimple_omp_for for_stmt = as_a <gimple_omp_for> ((gimple) wi.info);
2076 struct omp_for_data fd;
2077 extract_omp_for_data (for_stmt, &fd, NULL);
2078 /* We need two temporaries with fd.loop.v type (istart/iend)
2079 and then (fd.collapse - 1) temporaries with the same
2080 type for count2 ... countN-1 vars if not constant. */
2081 size_t count = 2, i;
2082 tree type = fd.iter_type;
2083 if (fd.collapse > 1
2084 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2085 count += fd.collapse - 1;
2086 for (i = 0; i < count; i++)
2088 tree temp = create_tmp_var (type, NULL);
2089 tree c = build_omp_clause (UNKNOWN_LOCATION,
2090 OMP_CLAUSE__LOOPTEMP_);
2091 insert_decl_map (&outer_ctx->cb, temp, temp);
2092 OMP_CLAUSE_DECL (c) = temp;
2093 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2094 gimple_omp_parallel_set_clauses (stmt, c);
2099 ctx = new_omp_context (stmt, outer_ctx);
2100 taskreg_contexts.safe_push (ctx);
2101 if (taskreg_nesting_level > 1)
2102 ctx->is_nested = true;
2103 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2104 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2105 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2106 name = create_tmp_var_name (".omp_data_s");
2107 name = build_decl (gimple_location (stmt),
2108 TYPE_DECL, name, ctx->record_type);
2109 DECL_ARTIFICIAL (name) = 1;
2110 DECL_NAMELESS (name) = 1;
2111 TYPE_NAME (ctx->record_type) = name;
2112 create_omp_child_function (ctx, false);
2113 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2115 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2116 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2118 if (TYPE_FIELDS (ctx->record_type) == NULL)
2119 ctx->record_type = ctx->receiver_decl = NULL;
2122 /* Scan an OpenMP task directive. */
2124 static void
2125 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2127 omp_context *ctx;
2128 tree name, t;
2129 gimple_omp_task stmt = as_a <gimple_omp_task> (gsi_stmt (*gsi));
2131 /* Ignore task directives with empty bodies. */
2132 if (optimize > 0
2133 && empty_body_p (gimple_omp_body (stmt)))
2135 gsi_replace (gsi, gimple_build_nop (), false);
2136 return;
2139 ctx = new_omp_context (stmt, outer_ctx);
2140 taskreg_contexts.safe_push (ctx);
2141 if (taskreg_nesting_level > 1)
2142 ctx->is_nested = true;
2143 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2144 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2145 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2146 name = create_tmp_var_name (".omp_data_s");
2147 name = build_decl (gimple_location (stmt),
2148 TYPE_DECL, name, ctx->record_type);
2149 DECL_ARTIFICIAL (name) = 1;
2150 DECL_NAMELESS (name) = 1;
2151 TYPE_NAME (ctx->record_type) = name;
2152 create_omp_child_function (ctx, false);
2153 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2155 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2157 if (ctx->srecord_type)
2159 name = create_tmp_var_name (".omp_data_a");
2160 name = build_decl (gimple_location (stmt),
2161 TYPE_DECL, name, ctx->srecord_type);
2162 DECL_ARTIFICIAL (name) = 1;
2163 DECL_NAMELESS (name) = 1;
2164 TYPE_NAME (ctx->srecord_type) = name;
2165 create_omp_child_function (ctx, true);
2168 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2170 if (TYPE_FIELDS (ctx->record_type) == NULL)
2172 ctx->record_type = ctx->receiver_decl = NULL;
2173 t = build_int_cst (long_integer_type_node, 0);
2174 gimple_omp_task_set_arg_size (stmt, t);
2175 t = build_int_cst (long_integer_type_node, 1);
2176 gimple_omp_task_set_arg_align (stmt, t);
2181 /* If any decls have been made addressable during scan_omp,
2182 adjust their fields if needed, and layout record types
2183 of parallel/task constructs. */
2185 static void
2186 finish_taskreg_scan (omp_context *ctx)
2188 if (ctx->record_type == NULL_TREE)
2189 return;
2191 /* If any task_shared_vars were needed, verify all
2192 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2193 statements if use_pointer_for_field hasn't changed
2194 because of that. If it did, update field types now. */
2195 if (task_shared_vars)
2197 tree c;
2199 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2200 c; c = OMP_CLAUSE_CHAIN (c))
2201 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
2203 tree decl = OMP_CLAUSE_DECL (c);
2205 /* Global variables don't need to be copied,
2206 the receiver side will use them directly. */
2207 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2208 continue;
2209 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2210 || !use_pointer_for_field (decl, ctx))
2211 continue;
2212 tree field = lookup_field (decl, ctx);
2213 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2214 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2215 continue;
2216 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2217 TREE_THIS_VOLATILE (field) = 0;
2218 DECL_USER_ALIGN (field) = 0;
2219 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2220 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2221 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2222 if (ctx->srecord_type)
2224 tree sfield = lookup_sfield (decl, ctx);
2225 TREE_TYPE (sfield) = TREE_TYPE (field);
2226 TREE_THIS_VOLATILE (sfield) = 0;
2227 DECL_USER_ALIGN (sfield) = 0;
2228 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2229 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2230 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2235 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2237 layout_type (ctx->record_type);
2238 fixup_child_record_type (ctx);
2240 else
2242 location_t loc = gimple_location (ctx->stmt);
2243 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2244 /* Move VLA fields to the end. */
2245 p = &TYPE_FIELDS (ctx->record_type);
2246 while (*p)
2247 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2248 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2250 *q = *p;
2251 *p = TREE_CHAIN (*p);
2252 TREE_CHAIN (*q) = NULL_TREE;
2253 q = &TREE_CHAIN (*q);
2255 else
2256 p = &DECL_CHAIN (*p);
2257 *p = vla_fields;
2258 layout_type (ctx->record_type);
2259 fixup_child_record_type (ctx);
2260 if (ctx->srecord_type)
2261 layout_type (ctx->srecord_type);
2262 tree t = fold_convert_loc (loc, long_integer_type_node,
2263 TYPE_SIZE_UNIT (ctx->record_type));
2264 gimple_omp_task_set_arg_size (ctx->stmt, t);
2265 t = build_int_cst (long_integer_type_node,
2266 TYPE_ALIGN_UNIT (ctx->record_type));
2267 gimple_omp_task_set_arg_align (ctx->stmt, t);
2272 /* Scan an OpenMP loop directive. */
2274 static void
2275 scan_omp_for (gimple_omp_for stmt, omp_context *outer_ctx)
2277 omp_context *ctx;
2278 size_t i;
2280 ctx = new_omp_context (stmt, outer_ctx);
2282 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2284 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2285 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2287 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2288 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2289 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2290 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2292 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2295 /* Scan an OpenMP sections directive. */
2297 static void
2298 scan_omp_sections (gimple_omp_sections stmt, omp_context *outer_ctx)
2300 omp_context *ctx;
2302 ctx = new_omp_context (stmt, outer_ctx);
2303 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2304 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2307 /* Scan an OpenMP single directive. */
2309 static void
2310 scan_omp_single (gimple_omp_single stmt, omp_context *outer_ctx)
2312 omp_context *ctx;
2313 tree name;
2315 ctx = new_omp_context (stmt, outer_ctx);
2316 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2317 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2318 name = create_tmp_var_name (".omp_copy_s");
2319 name = build_decl (gimple_location (stmt),
2320 TYPE_DECL, name, ctx->record_type);
2321 TYPE_NAME (ctx->record_type) = name;
2323 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2324 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2326 if (TYPE_FIELDS (ctx->record_type) == NULL)
2327 ctx->record_type = NULL;
2328 else
2329 layout_type (ctx->record_type);
2332 /* Scan an OpenMP target{, data, update} directive. */
2334 static void
2335 scan_omp_target (gimple_omp_target stmt, omp_context *outer_ctx)
2337 omp_context *ctx;
2338 tree name;
2339 int kind = gimple_omp_target_kind (stmt);
2341 ctx = new_omp_context (stmt, outer_ctx);
2342 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2343 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2344 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2345 name = create_tmp_var_name (".omp_data_t");
2346 name = build_decl (gimple_location (stmt),
2347 TYPE_DECL, name, ctx->record_type);
2348 DECL_ARTIFICIAL (name) = 1;
2349 DECL_NAMELESS (name) = 1;
2350 TYPE_NAME (ctx->record_type) = name;
2351 if (kind == GF_OMP_TARGET_KIND_REGION)
2353 create_omp_child_function (ctx, false);
2354 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2357 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2358 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2360 if (TYPE_FIELDS (ctx->record_type) == NULL)
2361 ctx->record_type = ctx->receiver_decl = NULL;
2362 else
2364 TYPE_FIELDS (ctx->record_type)
2365 = nreverse (TYPE_FIELDS (ctx->record_type));
2366 #ifdef ENABLE_CHECKING
2367 tree field;
2368 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2369 for (field = TYPE_FIELDS (ctx->record_type);
2370 field;
2371 field = DECL_CHAIN (field))
2372 gcc_assert (DECL_ALIGN (field) == align);
2373 #endif
2374 layout_type (ctx->record_type);
2375 if (kind == GF_OMP_TARGET_KIND_REGION)
2376 fixup_child_record_type (ctx);
2380 /* Scan an OpenMP teams directive. */
2382 static void
2383 scan_omp_teams (gimple_omp_teams stmt, omp_context *outer_ctx)
2385 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2386 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2387 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2390 /* Check OpenMP nesting restrictions. */
2391 static bool
2392 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2394 if (ctx != NULL)
2396 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2397 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2399 error_at (gimple_location (stmt),
2400 "OpenMP constructs may not be nested inside simd region");
2401 return false;
2403 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2405 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2406 || (gimple_omp_for_kind (stmt)
2407 != GF_OMP_FOR_KIND_DISTRIBUTE))
2408 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2410 error_at (gimple_location (stmt),
2411 "only distribute or parallel constructs are allowed to "
2412 "be closely nested inside teams construct");
2413 return false;
2417 switch (gimple_code (stmt))
2419 case GIMPLE_OMP_FOR:
2420 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2421 return true;
2422 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2424 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2426 error_at (gimple_location (stmt),
2427 "distribute construct must be closely nested inside "
2428 "teams construct");
2429 return false;
2431 return true;
2433 /* FALLTHRU */
2434 case GIMPLE_CALL:
2435 if (is_gimple_call (stmt)
2436 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2437 == BUILT_IN_GOMP_CANCEL
2438 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2439 == BUILT_IN_GOMP_CANCELLATION_POINT))
2441 const char *bad = NULL;
2442 const char *kind = NULL;
2443 if (ctx == NULL)
2445 error_at (gimple_location (stmt), "orphaned %qs construct",
2446 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2447 == BUILT_IN_GOMP_CANCEL
2448 ? "#pragma omp cancel"
2449 : "#pragma omp cancellation point");
2450 return false;
2452 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2453 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2454 : 0)
2456 case 1:
2457 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2458 bad = "#pragma omp parallel";
2459 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2460 == BUILT_IN_GOMP_CANCEL
2461 && !integer_zerop (gimple_call_arg (stmt, 1)))
2462 ctx->cancellable = true;
2463 kind = "parallel";
2464 break;
2465 case 2:
2466 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2467 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2468 bad = "#pragma omp for";
2469 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2470 == BUILT_IN_GOMP_CANCEL
2471 && !integer_zerop (gimple_call_arg (stmt, 1)))
2473 ctx->cancellable = true;
2474 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2475 OMP_CLAUSE_NOWAIT))
2476 warning_at (gimple_location (stmt), 0,
2477 "%<#pragma omp cancel for%> inside "
2478 "%<nowait%> for construct");
2479 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2480 OMP_CLAUSE_ORDERED))
2481 warning_at (gimple_location (stmt), 0,
2482 "%<#pragma omp cancel for%> inside "
2483 "%<ordered%> for construct");
2485 kind = "for";
2486 break;
2487 case 4:
2488 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2489 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2490 bad = "#pragma omp sections";
2491 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2492 == BUILT_IN_GOMP_CANCEL
2493 && !integer_zerop (gimple_call_arg (stmt, 1)))
2495 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2497 ctx->cancellable = true;
2498 if (find_omp_clause (gimple_omp_sections_clauses
2499 (ctx->stmt),
2500 OMP_CLAUSE_NOWAIT))
2501 warning_at (gimple_location (stmt), 0,
2502 "%<#pragma omp cancel sections%> inside "
2503 "%<nowait%> sections construct");
2505 else
2507 gcc_assert (ctx->outer
2508 && gimple_code (ctx->outer->stmt)
2509 == GIMPLE_OMP_SECTIONS);
2510 ctx->outer->cancellable = true;
2511 if (find_omp_clause (gimple_omp_sections_clauses
2512 (ctx->outer->stmt),
2513 OMP_CLAUSE_NOWAIT))
2514 warning_at (gimple_location (stmt), 0,
2515 "%<#pragma omp cancel sections%> inside "
2516 "%<nowait%> sections construct");
2519 kind = "sections";
2520 break;
2521 case 8:
2522 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2523 bad = "#pragma omp task";
2524 else
2525 ctx->cancellable = true;
2526 kind = "taskgroup";
2527 break;
2528 default:
2529 error_at (gimple_location (stmt), "invalid arguments");
2530 return false;
2532 if (bad)
2534 error_at (gimple_location (stmt),
2535 "%<%s %s%> construct not closely nested inside of %qs",
2536 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2537 == BUILT_IN_GOMP_CANCEL
2538 ? "#pragma omp cancel"
2539 : "#pragma omp cancellation point", kind, bad);
2540 return false;
2543 /* FALLTHRU */
2544 case GIMPLE_OMP_SECTIONS:
2545 case GIMPLE_OMP_SINGLE:
2546 for (; ctx != NULL; ctx = ctx->outer)
2547 switch (gimple_code (ctx->stmt))
2549 case GIMPLE_OMP_FOR:
2550 case GIMPLE_OMP_SECTIONS:
2551 case GIMPLE_OMP_SINGLE:
2552 case GIMPLE_OMP_ORDERED:
2553 case GIMPLE_OMP_MASTER:
2554 case GIMPLE_OMP_TASK:
2555 case GIMPLE_OMP_CRITICAL:
2556 if (is_gimple_call (stmt))
2558 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2559 != BUILT_IN_GOMP_BARRIER)
2560 return true;
2561 error_at (gimple_location (stmt),
2562 "barrier region may not be closely nested inside "
2563 "of work-sharing, critical, ordered, master or "
2564 "explicit task region");
2565 return false;
2567 error_at (gimple_location (stmt),
2568 "work-sharing region may not be closely nested inside "
2569 "of work-sharing, critical, ordered, master or explicit "
2570 "task region");
2571 return false;
2572 case GIMPLE_OMP_PARALLEL:
2573 return true;
2574 default:
2575 break;
2577 break;
2578 case GIMPLE_OMP_MASTER:
2579 for (; ctx != NULL; ctx = ctx->outer)
2580 switch (gimple_code (ctx->stmt))
2582 case GIMPLE_OMP_FOR:
2583 case GIMPLE_OMP_SECTIONS:
2584 case GIMPLE_OMP_SINGLE:
2585 case GIMPLE_OMP_TASK:
2586 error_at (gimple_location (stmt),
2587 "master region may not be closely nested inside "
2588 "of work-sharing or explicit task region");
2589 return false;
2590 case GIMPLE_OMP_PARALLEL:
2591 return true;
2592 default:
2593 break;
2595 break;
2596 case GIMPLE_OMP_ORDERED:
2597 for (; ctx != NULL; ctx = ctx->outer)
2598 switch (gimple_code (ctx->stmt))
2600 case GIMPLE_OMP_CRITICAL:
2601 case GIMPLE_OMP_TASK:
2602 error_at (gimple_location (stmt),
2603 "ordered region may not be closely nested inside "
2604 "of critical or explicit task region");
2605 return false;
2606 case GIMPLE_OMP_FOR:
2607 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2608 OMP_CLAUSE_ORDERED) == NULL)
2610 error_at (gimple_location (stmt),
2611 "ordered region must be closely nested inside "
2612 "a loop region with an ordered clause");
2613 return false;
2615 return true;
2616 case GIMPLE_OMP_PARALLEL:
2617 error_at (gimple_location (stmt),
2618 "ordered region must be closely nested inside "
2619 "a loop region with an ordered clause");
2620 return false;
2621 default:
2622 break;
2624 break;
2625 case GIMPLE_OMP_CRITICAL:
2627 tree this_stmt_name =
2628 gimple_omp_critical_name (as_a <gimple_omp_critical> (stmt));
2629 for (; ctx != NULL; ctx = ctx->outer)
2630 if (gimple_omp_critical other_crit =
2631 dyn_cast <gimple_omp_critical> (ctx->stmt))
2632 if (this_stmt_name == gimple_omp_critical_name (other_crit))
2634 error_at (gimple_location (stmt),
2635 "critical region may not be nested inside a critical "
2636 "region with the same name");
2637 return false;
2640 break;
2641 case GIMPLE_OMP_TEAMS:
2642 if (ctx == NULL
2643 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2644 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2646 error_at (gimple_location (stmt),
2647 "teams construct not closely nested inside of target "
2648 "region");
2649 return false;
2651 break;
2652 case GIMPLE_OMP_TARGET:
2653 for (; ctx != NULL; ctx = ctx->outer)
2654 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
2655 && gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_REGION)
2657 const char *name;
2658 switch (gimple_omp_target_kind (stmt))
2660 case GF_OMP_TARGET_KIND_REGION: name = "target"; break;
2661 case GF_OMP_TARGET_KIND_DATA: name = "target data"; break;
2662 case GF_OMP_TARGET_KIND_UPDATE: name = "target update"; break;
2663 default: gcc_unreachable ();
2665 warning_at (gimple_location (stmt), 0,
2666 "%s construct inside of target region", name);
2668 break;
2669 default:
2670 break;
2672 return true;
2676 /* Helper function scan_omp.
2678 Callback for walk_tree or operators in walk_gimple_stmt used to
2679 scan for OpenMP directives in TP. */
2681 static tree
2682 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2684 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2685 omp_context *ctx = (omp_context *) wi->info;
2686 tree t = *tp;
2688 switch (TREE_CODE (t))
2690 case VAR_DECL:
2691 case PARM_DECL:
2692 case LABEL_DECL:
2693 case RESULT_DECL:
2694 if (ctx)
2695 *tp = remap_decl (t, &ctx->cb);
2696 break;
2698 default:
2699 if (ctx && TYPE_P (t))
2700 *tp = remap_type (t, &ctx->cb);
2701 else if (!DECL_P (t))
2703 *walk_subtrees = 1;
2704 if (ctx)
2706 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2707 if (tem != TREE_TYPE (t))
2709 if (TREE_CODE (t) == INTEGER_CST)
2710 *tp = wide_int_to_tree (tem, t);
2711 else
2712 TREE_TYPE (t) = tem;
2716 break;
2719 return NULL_TREE;
2722 /* Return true if FNDECL is a setjmp or a longjmp. */
2724 static bool
2725 setjmp_or_longjmp_p (const_tree fndecl)
2727 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2728 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2729 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2730 return true;
2732 tree declname = DECL_NAME (fndecl);
2733 if (!declname)
2734 return false;
2735 const char *name = IDENTIFIER_POINTER (declname);
2736 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2740 /* Helper function for scan_omp.
2742 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2743 the current statement in GSI. */
2745 static tree
2746 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2747 struct walk_stmt_info *wi)
2749 gimple stmt = gsi_stmt (*gsi);
2750 omp_context *ctx = (omp_context *) wi->info;
2752 if (gimple_has_location (stmt))
2753 input_location = gimple_location (stmt);
2755 /* Check the OpenMP nesting restrictions. */
2756 bool remove = false;
2757 if (is_gimple_omp (stmt))
2758 remove = !check_omp_nesting_restrictions (stmt, ctx);
2759 else if (is_gimple_call (stmt))
2761 tree fndecl = gimple_call_fndecl (stmt);
2762 if (fndecl)
2764 if (setjmp_or_longjmp_p (fndecl)
2765 && ctx
2766 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2767 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2769 remove = true;
2770 error_at (gimple_location (stmt),
2771 "setjmp/longjmp inside simd construct");
2773 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2774 switch (DECL_FUNCTION_CODE (fndecl))
2776 case BUILT_IN_GOMP_BARRIER:
2777 case BUILT_IN_GOMP_CANCEL:
2778 case BUILT_IN_GOMP_CANCELLATION_POINT:
2779 case BUILT_IN_GOMP_TASKYIELD:
2780 case BUILT_IN_GOMP_TASKWAIT:
2781 case BUILT_IN_GOMP_TASKGROUP_START:
2782 case BUILT_IN_GOMP_TASKGROUP_END:
2783 remove = !check_omp_nesting_restrictions (stmt, ctx);
2784 break;
2785 default:
2786 break;
2790 if (remove)
2792 stmt = gimple_build_nop ();
2793 gsi_replace (gsi, stmt, false);
2796 *handled_ops_p = true;
2798 switch (gimple_code (stmt))
2800 case GIMPLE_OMP_PARALLEL:
2801 taskreg_nesting_level++;
2802 scan_omp_parallel (gsi, ctx);
2803 taskreg_nesting_level--;
2804 break;
2806 case GIMPLE_OMP_TASK:
2807 taskreg_nesting_level++;
2808 scan_omp_task (gsi, ctx);
2809 taskreg_nesting_level--;
2810 break;
2812 case GIMPLE_OMP_FOR:
2813 scan_omp_for (as_a <gimple_omp_for> (stmt), ctx);
2814 break;
2816 case GIMPLE_OMP_SECTIONS:
2817 scan_omp_sections (as_a <gimple_omp_sections> (stmt), ctx);
2818 break;
2820 case GIMPLE_OMP_SINGLE:
2821 scan_omp_single (as_a <gimple_omp_single> (stmt), ctx);
2822 break;
2824 case GIMPLE_OMP_SECTION:
2825 case GIMPLE_OMP_MASTER:
2826 case GIMPLE_OMP_TASKGROUP:
2827 case GIMPLE_OMP_ORDERED:
2828 case GIMPLE_OMP_CRITICAL:
2829 ctx = new_omp_context (stmt, ctx);
2830 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2831 break;
2833 case GIMPLE_OMP_TARGET:
2834 scan_omp_target (as_a <gimple_omp_target> (stmt), ctx);
2835 break;
2837 case GIMPLE_OMP_TEAMS:
2838 scan_omp_teams (as_a <gimple_omp_teams> (stmt), ctx);
2839 break;
2841 case GIMPLE_BIND:
2843 tree var;
2845 *handled_ops_p = false;
2846 if (ctx)
2847 for (var = gimple_bind_vars (as_a <gimple_bind> (stmt));
2848 var ;
2849 var = DECL_CHAIN (var))
2850 insert_decl_map (&ctx->cb, var, var);
2852 break;
2853 default:
2854 *handled_ops_p = false;
2855 break;
2858 return NULL_TREE;
2862 /* Scan all the statements starting at the current statement. CTX
2863 contains context information about the OpenMP directives and
2864 clauses found during the scan. */
2866 static void
2867 scan_omp (gimple_seq *body_p, omp_context *ctx)
2869 location_t saved_location;
2870 struct walk_stmt_info wi;
2872 memset (&wi, 0, sizeof (wi));
2873 wi.info = ctx;
2874 wi.want_locations = true;
2876 saved_location = input_location;
2877 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2878 input_location = saved_location;
2881 /* Re-gimplification and code generation routines. */
2883 /* Build a call to GOMP_barrier. */
2885 static gimple
2886 build_omp_barrier (tree lhs)
2888 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2889 : BUILT_IN_GOMP_BARRIER);
2890 gimple_call g = gimple_build_call (fndecl, 0);
2891 if (lhs)
2892 gimple_call_set_lhs (g, lhs);
2893 return g;
2896 /* If a context was created for STMT when it was scanned, return it. */
2898 static omp_context *
2899 maybe_lookup_ctx (gimple stmt)
2901 splay_tree_node n;
2902 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2903 return n ? (omp_context *) n->value : NULL;
2907 /* Find the mapping for DECL in CTX or the immediately enclosing
2908 context that has a mapping for DECL.
2910 If CTX is a nested parallel directive, we may have to use the decl
2911 mappings created in CTX's parent context. Suppose that we have the
2912 following parallel nesting (variable UIDs showed for clarity):
2914 iD.1562 = 0;
2915 #omp parallel shared(iD.1562) -> outer parallel
2916 iD.1562 = iD.1562 + 1;
2918 #omp parallel shared (iD.1562) -> inner parallel
2919 iD.1562 = iD.1562 - 1;
2921 Each parallel structure will create a distinct .omp_data_s structure
2922 for copying iD.1562 in/out of the directive:
2924 outer parallel .omp_data_s.1.i -> iD.1562
2925 inner parallel .omp_data_s.2.i -> iD.1562
2927 A shared variable mapping will produce a copy-out operation before
2928 the parallel directive and a copy-in operation after it. So, in
2929 this case we would have:
2931 iD.1562 = 0;
2932 .omp_data_o.1.i = iD.1562;
2933 #omp parallel shared(iD.1562) -> outer parallel
2934 .omp_data_i.1 = &.omp_data_o.1
2935 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2937 .omp_data_o.2.i = iD.1562; -> **
2938 #omp parallel shared(iD.1562) -> inner parallel
2939 .omp_data_i.2 = &.omp_data_o.2
2940 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2943 ** This is a problem. The symbol iD.1562 cannot be referenced
2944 inside the body of the outer parallel region. But since we are
2945 emitting this copy operation while expanding the inner parallel
2946 directive, we need to access the CTX structure of the outer
2947 parallel directive to get the correct mapping:
2949 .omp_data_o.2.i = .omp_data_i.1->i
2951 Since there may be other workshare or parallel directives enclosing
2952 the parallel directive, it may be necessary to walk up the context
2953 parent chain. This is not a problem in general because nested
2954 parallelism happens only rarely. */
2956 static tree
2957 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2959 tree t;
2960 omp_context *up;
2962 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2963 t = maybe_lookup_decl (decl, up);
2965 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2967 return t ? t : decl;
2971 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2972 in outer contexts. */
2974 static tree
2975 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2977 tree t = NULL;
2978 omp_context *up;
2980 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2981 t = maybe_lookup_decl (decl, up);
2983 return t ? t : decl;
2987 /* Construct the initialization value for reduction CLAUSE. */
2989 tree
2990 omp_reduction_init (tree clause, tree type)
2992 location_t loc = OMP_CLAUSE_LOCATION (clause);
2993 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2995 case PLUS_EXPR:
2996 case MINUS_EXPR:
2997 case BIT_IOR_EXPR:
2998 case BIT_XOR_EXPR:
2999 case TRUTH_OR_EXPR:
3000 case TRUTH_ORIF_EXPR:
3001 case TRUTH_XOR_EXPR:
3002 case NE_EXPR:
3003 return build_zero_cst (type);
3005 case MULT_EXPR:
3006 case TRUTH_AND_EXPR:
3007 case TRUTH_ANDIF_EXPR:
3008 case EQ_EXPR:
3009 return fold_convert_loc (loc, type, integer_one_node);
3011 case BIT_AND_EXPR:
3012 return fold_convert_loc (loc, type, integer_minus_one_node);
3014 case MAX_EXPR:
3015 if (SCALAR_FLOAT_TYPE_P (type))
3017 REAL_VALUE_TYPE max, min;
3018 if (HONOR_INFINITIES (TYPE_MODE (type)))
3020 real_inf (&max);
3021 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3023 else
3024 real_maxval (&min, 1, TYPE_MODE (type));
3025 return build_real (type, min);
3027 else
3029 gcc_assert (INTEGRAL_TYPE_P (type));
3030 return TYPE_MIN_VALUE (type);
3033 case MIN_EXPR:
3034 if (SCALAR_FLOAT_TYPE_P (type))
3036 REAL_VALUE_TYPE max;
3037 if (HONOR_INFINITIES (TYPE_MODE (type)))
3038 real_inf (&max);
3039 else
3040 real_maxval (&max, 0, TYPE_MODE (type));
3041 return build_real (type, max);
3043 else
3045 gcc_assert (INTEGRAL_TYPE_P (type));
3046 return TYPE_MAX_VALUE (type);
3049 default:
3050 gcc_unreachable ();
3054 /* Return alignment to be assumed for var in CLAUSE, which should be
3055 OMP_CLAUSE_ALIGNED. */
3057 static tree
3058 omp_clause_aligned_alignment (tree clause)
3060 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3061 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3063 /* Otherwise return implementation defined alignment. */
3064 unsigned int al = 1;
3065 enum machine_mode mode, vmode;
3066 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3067 if (vs)
3068 vs = 1 << floor_log2 (vs);
3069 static enum mode_class classes[]
3070 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3071 for (int i = 0; i < 4; i += 2)
3072 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3073 mode != VOIDmode;
3074 mode = GET_MODE_WIDER_MODE (mode))
3076 vmode = targetm.vectorize.preferred_simd_mode (mode);
3077 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3078 continue;
3079 while (vs
3080 && GET_MODE_SIZE (vmode) < vs
3081 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3082 vmode = GET_MODE_2XWIDER_MODE (vmode);
3084 tree type = lang_hooks.types.type_for_mode (mode, 1);
3085 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3086 continue;
3087 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3088 / GET_MODE_SIZE (mode));
3089 if (TYPE_MODE (type) != vmode)
3090 continue;
3091 if (TYPE_ALIGN_UNIT (type) > al)
3092 al = TYPE_ALIGN_UNIT (type);
3094 return build_int_cst (integer_type_node, al);
3097 /* Return maximum possible vectorization factor for the target. */
3099 static int
3100 omp_max_vf (void)
3102 if (!optimize
3103 || optimize_debug
3104 || !flag_tree_loop_optimize
3105 || (!flag_tree_loop_vectorize
3106 && (global_options_set.x_flag_tree_loop_vectorize
3107 || global_options_set.x_flag_tree_vectorize)))
3108 return 1;
3110 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3111 if (vs)
3113 vs = 1 << floor_log2 (vs);
3114 return vs;
3116 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3117 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3118 return GET_MODE_NUNITS (vqimode);
3119 return 1;
3122 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3123 privatization. */
3125 static bool
3126 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3127 tree &idx, tree &lane, tree &ivar, tree &lvar)
3129 if (max_vf == 0)
3131 max_vf = omp_max_vf ();
3132 if (max_vf > 1)
3134 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3135 OMP_CLAUSE_SAFELEN);
3136 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3137 max_vf = 1;
3138 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3139 max_vf) == -1)
3140 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3142 if (max_vf > 1)
3144 idx = create_tmp_var (unsigned_type_node, NULL);
3145 lane = create_tmp_var (unsigned_type_node, NULL);
3148 if (max_vf == 1)
3149 return false;
3151 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3152 tree avar = create_tmp_var_raw (atype, NULL);
3153 if (TREE_ADDRESSABLE (new_var))
3154 TREE_ADDRESSABLE (avar) = 1;
3155 DECL_ATTRIBUTES (avar)
3156 = tree_cons (get_identifier ("omp simd array"), NULL,
3157 DECL_ATTRIBUTES (avar));
3158 gimple_add_tmp_var (avar);
3159 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3160 NULL_TREE, NULL_TREE);
3161 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3162 NULL_TREE, NULL_TREE);
3163 if (DECL_P (new_var))
3165 SET_DECL_VALUE_EXPR (new_var, lvar);
3166 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3168 return true;
3171 /* Helper function of lower_rec_input_clauses. For a reference
3172 in simd reduction, add an underlying variable it will reference. */
3174 static void
3175 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3177 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3178 if (TREE_CONSTANT (z))
3180 const char *name = NULL;
3181 if (DECL_NAME (new_vard))
3182 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3184 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3185 gimple_add_tmp_var (z);
3186 TREE_ADDRESSABLE (z) = 1;
3187 z = build_fold_addr_expr_loc (loc, z);
3188 gimplify_assign (new_vard, z, ilist);
3192 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3193 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3194 private variables. Initialization statements go in ILIST, while calls
3195 to destructors go in DLIST. */
3197 static void
3198 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3199 omp_context *ctx, struct omp_for_data *fd)
3201 tree c, dtor, copyin_seq, x, ptr;
3202 bool copyin_by_ref = false;
3203 bool lastprivate_firstprivate = false;
3204 bool reduction_omp_orig_ref = false;
3205 int pass;
3206 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3207 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3208 int max_vf = 0;
3209 tree lane = NULL_TREE, idx = NULL_TREE;
3210 tree ivar = NULL_TREE, lvar = NULL_TREE;
3211 gimple_seq llist[2] = { NULL, NULL };
3213 copyin_seq = NULL;
3215 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3216 with data sharing clauses referencing variable sized vars. That
3217 is unnecessarily hard to support and very unlikely to result in
3218 vectorized code anyway. */
3219 if (is_simd)
3220 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3221 switch (OMP_CLAUSE_CODE (c))
3223 case OMP_CLAUSE_LINEAR:
3224 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3225 max_vf = 1;
3226 /* FALLTHRU */
3227 case OMP_CLAUSE_REDUCTION:
3228 case OMP_CLAUSE_PRIVATE:
3229 case OMP_CLAUSE_FIRSTPRIVATE:
3230 case OMP_CLAUSE_LASTPRIVATE:
3231 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3232 max_vf = 1;
3233 break;
3234 default:
3235 continue;
3238 /* Do all the fixed sized types in the first pass, and the variable sized
3239 types in the second pass. This makes sure that the scalar arguments to
3240 the variable sized types are processed before we use them in the
3241 variable sized operations. */
3242 for (pass = 0; pass < 2; ++pass)
3244 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3246 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3247 tree var, new_var;
3248 bool by_ref;
3249 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3251 switch (c_kind)
3253 case OMP_CLAUSE_PRIVATE:
3254 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3255 continue;
3256 break;
3257 case OMP_CLAUSE_SHARED:
3258 /* Ignore shared directives in teams construct. */
3259 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3260 continue;
3261 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3263 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3264 continue;
3266 case OMP_CLAUSE_FIRSTPRIVATE:
3267 case OMP_CLAUSE_COPYIN:
3268 case OMP_CLAUSE_LINEAR:
3269 break;
3270 case OMP_CLAUSE_REDUCTION:
3271 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3272 reduction_omp_orig_ref = true;
3273 break;
3274 case OMP_CLAUSE__LOOPTEMP_:
3275 /* Handle _looptemp_ clauses only on parallel. */
3276 if (fd)
3277 continue;
3278 break;
3279 case OMP_CLAUSE_LASTPRIVATE:
3280 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3282 lastprivate_firstprivate = true;
3283 if (pass != 0)
3284 continue;
3286 /* Even without corresponding firstprivate, if
3287 decl is Fortran allocatable, it needs outer var
3288 reference. */
3289 else if (pass == 0
3290 && lang_hooks.decls.omp_private_outer_ref
3291 (OMP_CLAUSE_DECL (c)))
3292 lastprivate_firstprivate = true;
3293 break;
3294 case OMP_CLAUSE_ALIGNED:
3295 if (pass == 0)
3296 continue;
3297 var = OMP_CLAUSE_DECL (c);
3298 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3299 && !is_global_var (var))
3301 new_var = maybe_lookup_decl (var, ctx);
3302 if (new_var == NULL_TREE)
3303 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3304 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3305 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3306 omp_clause_aligned_alignment (c));
3307 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3308 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3309 gimplify_and_add (x, ilist);
3311 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3312 && is_global_var (var))
3314 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3315 new_var = lookup_decl (var, ctx);
3316 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3317 t = build_fold_addr_expr_loc (clause_loc, t);
3318 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3319 t = build_call_expr_loc (clause_loc, t2, 2, t,
3320 omp_clause_aligned_alignment (c));
3321 t = fold_convert_loc (clause_loc, ptype, t);
3322 x = create_tmp_var (ptype, NULL);
3323 t = build2 (MODIFY_EXPR, ptype, x, t);
3324 gimplify_and_add (t, ilist);
3325 t = build_simple_mem_ref_loc (clause_loc, x);
3326 SET_DECL_VALUE_EXPR (new_var, t);
3327 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3329 continue;
3330 default:
3331 continue;
3334 new_var = var = OMP_CLAUSE_DECL (c);
3335 if (c_kind != OMP_CLAUSE_COPYIN)
3336 new_var = lookup_decl (var, ctx);
3338 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3340 if (pass != 0)
3341 continue;
3343 else if (is_variable_sized (var))
3345 /* For variable sized types, we need to allocate the
3346 actual storage here. Call alloca and store the
3347 result in the pointer decl that we created elsewhere. */
3348 if (pass == 0)
3349 continue;
3351 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3353 gimple_call stmt;
3354 tree tmp, atmp;
3356 ptr = DECL_VALUE_EXPR (new_var);
3357 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3358 ptr = TREE_OPERAND (ptr, 0);
3359 gcc_assert (DECL_P (ptr));
3360 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3362 /* void *tmp = __builtin_alloca */
3363 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3364 stmt = gimple_build_call (atmp, 1, x);
3365 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3366 gimple_add_tmp_var (tmp);
3367 gimple_call_set_lhs (stmt, tmp);
3369 gimple_seq_add_stmt (ilist, stmt);
3371 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3372 gimplify_assign (ptr, x, ilist);
3375 else if (is_reference (var))
3377 /* For references that are being privatized for Fortran,
3378 allocate new backing storage for the new pointer
3379 variable. This allows us to avoid changing all the
3380 code that expects a pointer to something that expects
3381 a direct variable. */
3382 if (pass == 0)
3383 continue;
3385 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3386 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3388 x = build_receiver_ref (var, false, ctx);
3389 x = build_fold_addr_expr_loc (clause_loc, x);
3391 else if (TREE_CONSTANT (x))
3393 /* For reduction in SIMD loop, defer adding the
3394 initialization of the reference, because if we decide
3395 to use SIMD array for it, the initilization could cause
3396 expansion ICE. */
3397 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3398 x = NULL_TREE;
3399 else
3401 const char *name = NULL;
3402 if (DECL_NAME (var))
3403 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3405 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3406 name);
3407 gimple_add_tmp_var (x);
3408 TREE_ADDRESSABLE (x) = 1;
3409 x = build_fold_addr_expr_loc (clause_loc, x);
3412 else
3414 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3415 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3418 if (x)
3420 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3421 gimplify_assign (new_var, x, ilist);
3424 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3426 else if (c_kind == OMP_CLAUSE_REDUCTION
3427 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3429 if (pass == 0)
3430 continue;
3432 else if (pass != 0)
3433 continue;
3435 switch (OMP_CLAUSE_CODE (c))
3437 case OMP_CLAUSE_SHARED:
3438 /* Ignore shared directives in teams construct. */
3439 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3440 continue;
3441 /* Shared global vars are just accessed directly. */
3442 if (is_global_var (new_var))
3443 break;
3444 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3445 needs to be delayed until after fixup_child_record_type so
3446 that we get the correct type during the dereference. */
3447 by_ref = use_pointer_for_field (var, ctx);
3448 x = build_receiver_ref (var, by_ref, ctx);
3449 SET_DECL_VALUE_EXPR (new_var, x);
3450 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3452 /* ??? If VAR is not passed by reference, and the variable
3453 hasn't been initialized yet, then we'll get a warning for
3454 the store into the omp_data_s structure. Ideally, we'd be
3455 able to notice this and not store anything at all, but
3456 we're generating code too early. Suppress the warning. */
3457 if (!by_ref)
3458 TREE_NO_WARNING (var) = 1;
3459 break;
3461 case OMP_CLAUSE_LASTPRIVATE:
3462 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3463 break;
3464 /* FALLTHRU */
3466 case OMP_CLAUSE_PRIVATE:
3467 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3468 x = build_outer_var_ref (var, ctx);
3469 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3471 if (is_task_ctx (ctx))
3472 x = build_receiver_ref (var, false, ctx);
3473 else
3474 x = build_outer_var_ref (var, ctx);
3476 else
3477 x = NULL;
3478 do_private:
3479 tree nx;
3480 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3481 if (is_simd)
3483 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3484 if ((TREE_ADDRESSABLE (new_var) || nx || y
3485 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3486 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3487 idx, lane, ivar, lvar))
3489 if (nx)
3490 x = lang_hooks.decls.omp_clause_default_ctor
3491 (c, unshare_expr (ivar), x);
3492 if (nx && x)
3493 gimplify_and_add (x, &llist[0]);
3494 if (y)
3496 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3497 if (y)
3499 gimple_seq tseq = NULL;
3501 dtor = y;
3502 gimplify_stmt (&dtor, &tseq);
3503 gimple_seq_add_seq (&llist[1], tseq);
3506 break;
3509 if (nx)
3510 gimplify_and_add (nx, ilist);
3511 /* FALLTHRU */
3513 do_dtor:
3514 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3515 if (x)
3517 gimple_seq tseq = NULL;
3519 dtor = x;
3520 gimplify_stmt (&dtor, &tseq);
3521 gimple_seq_add_seq (dlist, tseq);
3523 break;
3525 case OMP_CLAUSE_LINEAR:
3526 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3527 goto do_firstprivate;
3528 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3529 x = NULL;
3530 else
3531 x = build_outer_var_ref (var, ctx);
3532 goto do_private;
3534 case OMP_CLAUSE_FIRSTPRIVATE:
3535 if (is_task_ctx (ctx))
3537 if (is_reference (var) || is_variable_sized (var))
3538 goto do_dtor;
3539 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3540 ctx))
3541 || use_pointer_for_field (var, NULL))
3543 x = build_receiver_ref (var, false, ctx);
3544 SET_DECL_VALUE_EXPR (new_var, x);
3545 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3546 goto do_dtor;
3549 do_firstprivate:
3550 x = build_outer_var_ref (var, ctx);
3551 if (is_simd)
3553 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3554 && gimple_omp_for_combined_into_p (ctx->stmt))
3556 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3557 tree stept = TREE_TYPE (t);
3558 tree ct = find_omp_clause (clauses,
3559 OMP_CLAUSE__LOOPTEMP_);
3560 gcc_assert (ct);
3561 tree l = OMP_CLAUSE_DECL (ct);
3562 tree n1 = fd->loop.n1;
3563 tree step = fd->loop.step;
3564 tree itype = TREE_TYPE (l);
3565 if (POINTER_TYPE_P (itype))
3566 itype = signed_type_for (itype);
3567 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3568 if (TYPE_UNSIGNED (itype)
3569 && fd->loop.cond_code == GT_EXPR)
3570 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3571 fold_build1 (NEGATE_EXPR, itype, l),
3572 fold_build1 (NEGATE_EXPR,
3573 itype, step));
3574 else
3575 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3576 t = fold_build2 (MULT_EXPR, stept,
3577 fold_convert (stept, l), t);
3579 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3581 x = lang_hooks.decls.omp_clause_linear_ctor
3582 (c, new_var, x, t);
3583 gimplify_and_add (x, ilist);
3584 goto do_dtor;
3587 if (POINTER_TYPE_P (TREE_TYPE (x)))
3588 x = fold_build2 (POINTER_PLUS_EXPR,
3589 TREE_TYPE (x), x, t);
3590 else
3591 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3594 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3595 || TREE_ADDRESSABLE (new_var))
3596 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3597 idx, lane, ivar, lvar))
3599 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3601 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3602 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3603 gimplify_and_add (x, ilist);
3604 gimple_stmt_iterator gsi
3605 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3606 gimple_assign g
3607 = gimple_build_assign (unshare_expr (lvar), iv);
3608 gsi_insert_before_without_update (&gsi, g,
3609 GSI_SAME_STMT);
3610 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3611 enum tree_code code = PLUS_EXPR;
3612 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3613 code = POINTER_PLUS_EXPR;
3614 g = gimple_build_assign_with_ops (code, iv, iv, t);
3615 gsi_insert_before_without_update (&gsi, g,
3616 GSI_SAME_STMT);
3617 break;
3619 x = lang_hooks.decls.omp_clause_copy_ctor
3620 (c, unshare_expr (ivar), x);
3621 gimplify_and_add (x, &llist[0]);
3622 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3623 if (x)
3625 gimple_seq tseq = NULL;
3627 dtor = x;
3628 gimplify_stmt (&dtor, &tseq);
3629 gimple_seq_add_seq (&llist[1], tseq);
3631 break;
3634 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3635 gimplify_and_add (x, ilist);
3636 goto do_dtor;
3638 case OMP_CLAUSE__LOOPTEMP_:
3639 gcc_assert (is_parallel_ctx (ctx));
3640 x = build_outer_var_ref (var, ctx);
3641 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3642 gimplify_and_add (x, ilist);
3643 break;
3645 case OMP_CLAUSE_COPYIN:
3646 by_ref = use_pointer_for_field (var, NULL);
3647 x = build_receiver_ref (var, by_ref, ctx);
3648 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3649 append_to_statement_list (x, &copyin_seq);
3650 copyin_by_ref |= by_ref;
3651 break;
3653 case OMP_CLAUSE_REDUCTION:
3654 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3656 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3657 gimple tseq;
3658 x = build_outer_var_ref (var, ctx);
3660 if (is_reference (var)
3661 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3662 TREE_TYPE (x)))
3663 x = build_fold_addr_expr_loc (clause_loc, x);
3664 SET_DECL_VALUE_EXPR (placeholder, x);
3665 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3666 tree new_vard = new_var;
3667 if (is_reference (var))
3669 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3670 new_vard = TREE_OPERAND (new_var, 0);
3671 gcc_assert (DECL_P (new_vard));
3673 if (is_simd
3674 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3675 idx, lane, ivar, lvar))
3677 if (new_vard == new_var)
3679 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3680 SET_DECL_VALUE_EXPR (new_var, ivar);
3682 else
3684 SET_DECL_VALUE_EXPR (new_vard,
3685 build_fold_addr_expr (ivar));
3686 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3688 x = lang_hooks.decls.omp_clause_default_ctor
3689 (c, unshare_expr (ivar),
3690 build_outer_var_ref (var, ctx));
3691 if (x)
3692 gimplify_and_add (x, &llist[0]);
3693 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3695 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3696 lower_omp (&tseq, ctx);
3697 gimple_seq_add_seq (&llist[0], tseq);
3699 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3700 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3701 lower_omp (&tseq, ctx);
3702 gimple_seq_add_seq (&llist[1], tseq);
3703 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3704 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3705 if (new_vard == new_var)
3706 SET_DECL_VALUE_EXPR (new_var, lvar);
3707 else
3708 SET_DECL_VALUE_EXPR (new_vard,
3709 build_fold_addr_expr (lvar));
3710 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3711 if (x)
3713 tseq = NULL;
3714 dtor = x;
3715 gimplify_stmt (&dtor, &tseq);
3716 gimple_seq_add_seq (&llist[1], tseq);
3718 break;
3720 /* If this is a reference to constant size reduction var
3721 with placeholder, we haven't emitted the initializer
3722 for it because it is undesirable if SIMD arrays are used.
3723 But if they aren't used, we need to emit the deferred
3724 initialization now. */
3725 else if (is_reference (var) && is_simd)
3726 handle_simd_reference (clause_loc, new_vard, ilist);
3727 x = lang_hooks.decls.omp_clause_default_ctor
3728 (c, unshare_expr (new_var),
3729 build_outer_var_ref (var, ctx));
3730 if (x)
3731 gimplify_and_add (x, ilist);
3732 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3734 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3735 lower_omp (&tseq, ctx);
3736 gimple_seq_add_seq (ilist, tseq);
3738 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3739 if (is_simd)
3741 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3742 lower_omp (&tseq, ctx);
3743 gimple_seq_add_seq (dlist, tseq);
3744 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3746 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3747 goto do_dtor;
3749 else
3751 x = omp_reduction_init (c, TREE_TYPE (new_var));
3752 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3753 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3755 /* reduction(-:var) sums up the partial results, so it
3756 acts identically to reduction(+:var). */
3757 if (code == MINUS_EXPR)
3758 code = PLUS_EXPR;
3760 tree new_vard = new_var;
3761 if (is_simd && is_reference (var))
3763 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3764 new_vard = TREE_OPERAND (new_var, 0);
3765 gcc_assert (DECL_P (new_vard));
3767 if (is_simd
3768 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3769 idx, lane, ivar, lvar))
3771 tree ref = build_outer_var_ref (var, ctx);
3773 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3775 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3776 ref = build_outer_var_ref (var, ctx);
3777 gimplify_assign (ref, x, &llist[1]);
3779 if (new_vard != new_var)
3781 SET_DECL_VALUE_EXPR (new_vard,
3782 build_fold_addr_expr (lvar));
3783 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3786 else
3788 if (is_reference (var) && is_simd)
3789 handle_simd_reference (clause_loc, new_vard, ilist);
3790 gimplify_assign (new_var, x, ilist);
3791 if (is_simd)
3793 tree ref = build_outer_var_ref (var, ctx);
3795 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3796 ref = build_outer_var_ref (var, ctx);
3797 gimplify_assign (ref, x, dlist);
3801 break;
3803 default:
3804 gcc_unreachable ();
3809 if (lane)
3811 tree uid = create_tmp_var (ptr_type_node, "simduid");
3812 /* Don't want uninit warnings on simduid, it is always uninitialized,
3813 but we use it not for the value, but for the DECL_UID only. */
3814 TREE_NO_WARNING (uid) = 1;
3815 gimple g
3816 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3817 gimple_call_set_lhs (g, lane);
3818 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3819 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3820 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3821 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3822 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3823 gimple_omp_for_set_clauses (ctx->stmt, c);
3824 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3825 build_int_cst (unsigned_type_node, 0),
3826 NULL_TREE);
3827 gimple_seq_add_stmt (ilist, g);
3828 for (int i = 0; i < 2; i++)
3829 if (llist[i])
3831 tree vf = create_tmp_var (unsigned_type_node, NULL);
3832 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3833 gimple_call_set_lhs (g, vf);
3834 gimple_seq *seq = i == 0 ? ilist : dlist;
3835 gimple_seq_add_stmt (seq, g);
3836 tree t = build_int_cst (unsigned_type_node, 0);
3837 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3838 gimple_seq_add_stmt (seq, g);
3839 tree body = create_artificial_label (UNKNOWN_LOCATION);
3840 tree header = create_artificial_label (UNKNOWN_LOCATION);
3841 tree end = create_artificial_label (UNKNOWN_LOCATION);
3842 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3843 gimple_seq_add_stmt (seq, gimple_build_label (body));
3844 gimple_seq_add_seq (seq, llist[i]);
3845 t = build_int_cst (unsigned_type_node, 1);
3846 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3847 gimple_seq_add_stmt (seq, g);
3848 gimple_seq_add_stmt (seq, gimple_build_label (header));
3849 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3850 gimple_seq_add_stmt (seq, g);
3851 gimple_seq_add_stmt (seq, gimple_build_label (end));
3855 /* The copyin sequence is not to be executed by the main thread, since
3856 that would result in self-copies. Perhaps not visible to scalars,
3857 but it certainly is to C++ operator=. */
3858 if (copyin_seq)
3860 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3862 x = build2 (NE_EXPR, boolean_type_node, x,
3863 build_int_cst (TREE_TYPE (x), 0));
3864 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3865 gimplify_and_add (x, ilist);
3868 /* If any copyin variable is passed by reference, we must ensure the
3869 master thread doesn't modify it before it is copied over in all
3870 threads. Similarly for variables in both firstprivate and
3871 lastprivate clauses we need to ensure the lastprivate copying
3872 happens after firstprivate copying in all threads. And similarly
3873 for UDRs if initializer expression refers to omp_orig. */
3874 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3876 /* Don't add any barrier for #pragma omp simd or
3877 #pragma omp distribute. */
3878 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3879 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
3880 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3883 /* If max_vf is non-zero, then we can use only a vectorization factor
3884 up to the max_vf we chose. So stick it into the safelen clause. */
3885 if (max_vf)
3887 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3888 OMP_CLAUSE_SAFELEN);
3889 if (c == NULL_TREE
3890 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
3891 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3892 max_vf) == 1))
3894 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3895 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3896 max_vf);
3897 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3898 gimple_omp_for_set_clauses (ctx->stmt, c);
3904 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3905 both parallel and workshare constructs. PREDICATE may be NULL if it's
3906 always true. */
3908 static void
3909 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3910 omp_context *ctx)
3912 tree x, c, label = NULL, orig_clauses = clauses;
3913 bool par_clauses = false;
3914 tree simduid = NULL, lastlane = NULL;
3916 /* Early exit if there are no lastprivate or linear clauses. */
3917 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3918 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3919 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3920 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3921 break;
3922 if (clauses == NULL)
3924 /* If this was a workshare clause, see if it had been combined
3925 with its parallel. In that case, look for the clauses on the
3926 parallel statement itself. */
3927 if (is_parallel_ctx (ctx))
3928 return;
3930 ctx = ctx->outer;
3931 if (ctx == NULL || !is_parallel_ctx (ctx))
3932 return;
3934 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3935 OMP_CLAUSE_LASTPRIVATE);
3936 if (clauses == NULL)
3937 return;
3938 par_clauses = true;
3941 if (predicate)
3943 gimple_cond stmt;
3944 tree label_true, arm1, arm2;
3946 label = create_artificial_label (UNKNOWN_LOCATION);
3947 label_true = create_artificial_label (UNKNOWN_LOCATION);
3948 arm1 = TREE_OPERAND (predicate, 0);
3949 arm2 = TREE_OPERAND (predicate, 1);
3950 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3951 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3952 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3953 label_true, label);
3954 gimple_seq_add_stmt (stmt_list, stmt);
3955 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3958 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3959 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3961 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3962 if (simduid)
3963 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3966 for (c = clauses; c ;)
3968 tree var, new_var;
3969 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3971 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3972 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3973 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3975 var = OMP_CLAUSE_DECL (c);
3976 new_var = lookup_decl (var, ctx);
3978 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3980 tree val = DECL_VALUE_EXPR (new_var);
3981 if (TREE_CODE (val) == ARRAY_REF
3982 && VAR_P (TREE_OPERAND (val, 0))
3983 && lookup_attribute ("omp simd array",
3984 DECL_ATTRIBUTES (TREE_OPERAND (val,
3985 0))))
3987 if (lastlane == NULL)
3989 lastlane = create_tmp_var (unsigned_type_node, NULL);
3990 gimple_call g
3991 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3992 2, simduid,
3993 TREE_OPERAND (val, 1));
3994 gimple_call_set_lhs (g, lastlane);
3995 gimple_seq_add_stmt (stmt_list, g);
3997 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3998 TREE_OPERAND (val, 0), lastlane,
3999 NULL_TREE, NULL_TREE);
4003 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
4004 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
4006 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
4007 gimple_seq_add_seq (stmt_list,
4008 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
4009 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
4011 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4012 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
4014 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
4015 gimple_seq_add_seq (stmt_list,
4016 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
4017 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
4020 x = build_outer_var_ref (var, ctx);
4021 if (is_reference (var))
4022 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4023 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
4024 gimplify_and_add (x, stmt_list);
4026 c = OMP_CLAUSE_CHAIN (c);
4027 if (c == NULL && !par_clauses)
4029 /* If this was a workshare clause, see if it had been combined
4030 with its parallel. In that case, continue looking for the
4031 clauses also on the parallel statement itself. */
4032 if (is_parallel_ctx (ctx))
4033 break;
4035 ctx = ctx->outer;
4036 if (ctx == NULL || !is_parallel_ctx (ctx))
4037 break;
4039 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
4040 OMP_CLAUSE_LASTPRIVATE);
4041 par_clauses = true;
4045 if (label)
4046 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
4050 /* Generate code to implement the REDUCTION clauses. */
4052 static void
4053 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
4055 gimple_seq sub_seq = NULL;
4056 gimple stmt;
4057 tree x, c;
4058 int count = 0;
4060 /* SIMD reductions are handled in lower_rec_input_clauses. */
4061 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4062 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
4063 return;
4065 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
4066 update in that case, otherwise use a lock. */
4067 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
4068 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
4070 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4072 /* Never use OMP_ATOMIC for array reductions or UDRs. */
4073 count = -1;
4074 break;
4076 count++;
4079 if (count == 0)
4080 return;
4082 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4084 tree var, ref, new_var;
4085 enum tree_code code;
4086 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4088 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
4089 continue;
4091 var = OMP_CLAUSE_DECL (c);
4092 new_var = lookup_decl (var, ctx);
4093 if (is_reference (var))
4094 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4095 ref = build_outer_var_ref (var, ctx);
4096 code = OMP_CLAUSE_REDUCTION_CODE (c);
4098 /* reduction(-:var) sums up the partial results, so it acts
4099 identically to reduction(+:var). */
4100 if (code == MINUS_EXPR)
4101 code = PLUS_EXPR;
4103 if (count == 1)
4105 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
4107 addr = save_expr (addr);
4108 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
4109 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
4110 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
4111 gimplify_and_add (x, stmt_seqp);
4112 return;
4115 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4117 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4119 if (is_reference (var)
4120 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4121 TREE_TYPE (ref)))
4122 ref = build_fold_addr_expr_loc (clause_loc, ref);
4123 SET_DECL_VALUE_EXPR (placeholder, ref);
4124 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4125 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
4126 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
4127 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4128 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
4130 else
4132 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4133 ref = build_outer_var_ref (var, ctx);
4134 gimplify_assign (ref, x, &sub_seq);
4138 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4140 gimple_seq_add_stmt (stmt_seqp, stmt);
4142 gimple_seq_add_seq (stmt_seqp, sub_seq);
4144 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4146 gimple_seq_add_stmt (stmt_seqp, stmt);
4150 /* Generate code to implement the COPYPRIVATE clauses. */
4152 static void
4153 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4154 omp_context *ctx)
4156 tree c;
4158 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4160 tree var, new_var, ref, x;
4161 bool by_ref;
4162 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4164 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4165 continue;
4167 var = OMP_CLAUSE_DECL (c);
4168 by_ref = use_pointer_for_field (var, NULL);
4170 ref = build_sender_ref (var, ctx);
4171 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4172 if (by_ref)
4174 x = build_fold_addr_expr_loc (clause_loc, new_var);
4175 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4177 gimplify_assign (ref, x, slist);
4179 ref = build_receiver_ref (var, false, ctx);
4180 if (by_ref)
4182 ref = fold_convert_loc (clause_loc,
4183 build_pointer_type (TREE_TYPE (new_var)),
4184 ref);
4185 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4187 if (is_reference (var))
4189 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4190 ref = build_simple_mem_ref_loc (clause_loc, ref);
4191 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4193 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4194 gimplify_and_add (x, rlist);
4199 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4200 and REDUCTION from the sender (aka parent) side. */
4202 static void
4203 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4204 omp_context *ctx)
4206 tree c;
4208 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4210 tree val, ref, x, var;
4211 bool by_ref, do_in = false, do_out = false;
4212 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4214 switch (OMP_CLAUSE_CODE (c))
4216 case OMP_CLAUSE_PRIVATE:
4217 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4218 break;
4219 continue;
4220 case OMP_CLAUSE_FIRSTPRIVATE:
4221 case OMP_CLAUSE_COPYIN:
4222 case OMP_CLAUSE_LASTPRIVATE:
4223 case OMP_CLAUSE_REDUCTION:
4224 case OMP_CLAUSE__LOOPTEMP_:
4225 break;
4226 default:
4227 continue;
4230 val = OMP_CLAUSE_DECL (c);
4231 var = lookup_decl_in_outer_ctx (val, ctx);
4233 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4234 && is_global_var (var))
4235 continue;
4236 if (is_variable_sized (val))
4237 continue;
4238 by_ref = use_pointer_for_field (val, NULL);
4240 switch (OMP_CLAUSE_CODE (c))
4242 case OMP_CLAUSE_PRIVATE:
4243 case OMP_CLAUSE_FIRSTPRIVATE:
4244 case OMP_CLAUSE_COPYIN:
4245 case OMP_CLAUSE__LOOPTEMP_:
4246 do_in = true;
4247 break;
4249 case OMP_CLAUSE_LASTPRIVATE:
4250 if (by_ref || is_reference (val))
4252 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4253 continue;
4254 do_in = true;
4256 else
4258 do_out = true;
4259 if (lang_hooks.decls.omp_private_outer_ref (val))
4260 do_in = true;
4262 break;
4264 case OMP_CLAUSE_REDUCTION:
4265 do_in = true;
4266 do_out = !(by_ref || is_reference (val));
4267 break;
4269 default:
4270 gcc_unreachable ();
4273 if (do_in)
4275 ref = build_sender_ref (val, ctx);
4276 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4277 gimplify_assign (ref, x, ilist);
4278 if (is_task_ctx (ctx))
4279 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4282 if (do_out)
4284 ref = build_sender_ref (val, ctx);
4285 gimplify_assign (var, ref, olist);
4290 /* Generate code to implement SHARED from the sender (aka parent)
4291 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4292 list things that got automatically shared. */
4294 static void
4295 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4297 tree var, ovar, nvar, f, x, record_type;
4299 if (ctx->record_type == NULL)
4300 return;
4302 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4303 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4305 ovar = DECL_ABSTRACT_ORIGIN (f);
4306 nvar = maybe_lookup_decl (ovar, ctx);
4307 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4308 continue;
4310 /* If CTX is a nested parallel directive. Find the immediately
4311 enclosing parallel or workshare construct that contains a
4312 mapping for OVAR. */
4313 var = lookup_decl_in_outer_ctx (ovar, ctx);
4315 if (use_pointer_for_field (ovar, ctx))
4317 x = build_sender_ref (ovar, ctx);
4318 var = build_fold_addr_expr (var);
4319 gimplify_assign (x, var, ilist);
4321 else
4323 x = build_sender_ref (ovar, ctx);
4324 gimplify_assign (x, var, ilist);
4326 if (!TREE_READONLY (var)
4327 /* We don't need to receive a new reference to a result
4328 or parm decl. In fact we may not store to it as we will
4329 invalidate any pending RSO and generate wrong gimple
4330 during inlining. */
4331 && !((TREE_CODE (var) == RESULT_DECL
4332 || TREE_CODE (var) == PARM_DECL)
4333 && DECL_BY_REFERENCE (var)))
4335 x = build_sender_ref (ovar, ctx);
4336 gimplify_assign (var, x, olist);
4343 /* A convenience function to build an empty GIMPLE_COND with just the
4344 condition. */
4346 static gimple_cond
4347 gimple_build_cond_empty (tree cond)
4349 enum tree_code pred_code;
4350 tree lhs, rhs;
4352 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4353 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4357 /* Build the function calls to GOMP_parallel_start etc to actually
4358 generate the parallel operation. REGION is the parallel region
4359 being expanded. BB is the block where to insert the code. WS_ARGS
4360 will be set if this is a call to a combined parallel+workshare
4361 construct, it contains the list of additional arguments needed by
4362 the workshare construct. */
4364 static void
4365 expand_parallel_call (struct omp_region *region, basic_block bb,
4366 gimple_omp_parallel entry_stmt,
4367 vec<tree, va_gc> *ws_args)
4369 tree t, t1, t2, val, cond, c, clauses, flags;
4370 gimple_stmt_iterator gsi;
4371 gimple stmt;
4372 enum built_in_function start_ix;
4373 int start_ix2;
4374 location_t clause_loc;
4375 vec<tree, va_gc> *args;
4377 clauses = gimple_omp_parallel_clauses (entry_stmt);
4379 /* Determine what flavor of GOMP_parallel we will be
4380 emitting. */
4381 start_ix = BUILT_IN_GOMP_PARALLEL;
4382 if (is_combined_parallel (region))
4384 switch (region->inner->type)
4386 case GIMPLE_OMP_FOR:
4387 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4388 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4389 + (region->inner->sched_kind
4390 == OMP_CLAUSE_SCHEDULE_RUNTIME
4391 ? 3 : region->inner->sched_kind));
4392 start_ix = (enum built_in_function)start_ix2;
4393 break;
4394 case GIMPLE_OMP_SECTIONS:
4395 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4396 break;
4397 default:
4398 gcc_unreachable ();
4402 /* By default, the value of NUM_THREADS is zero (selected at run time)
4403 and there is no conditional. */
4404 cond = NULL_TREE;
4405 val = build_int_cst (unsigned_type_node, 0);
4406 flags = build_int_cst (unsigned_type_node, 0);
4408 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4409 if (c)
4410 cond = OMP_CLAUSE_IF_EXPR (c);
4412 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4413 if (c)
4415 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4416 clause_loc = OMP_CLAUSE_LOCATION (c);
4418 else
4419 clause_loc = gimple_location (entry_stmt);
4421 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4422 if (c)
4423 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4425 /* Ensure 'val' is of the correct type. */
4426 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4428 /* If we found the clause 'if (cond)', build either
4429 (cond != 0) or (cond ? val : 1u). */
4430 if (cond)
4432 cond = gimple_boolify (cond);
4434 if (integer_zerop (val))
4435 val = fold_build2_loc (clause_loc,
4436 EQ_EXPR, unsigned_type_node, cond,
4437 build_int_cst (TREE_TYPE (cond), 0));
4438 else
4440 basic_block cond_bb, then_bb, else_bb;
4441 edge e, e_then, e_else;
4442 tree tmp_then, tmp_else, tmp_join, tmp_var;
4444 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4445 if (gimple_in_ssa_p (cfun))
4447 tmp_then = make_ssa_name (tmp_var, NULL);
4448 tmp_else = make_ssa_name (tmp_var, NULL);
4449 tmp_join = make_ssa_name (tmp_var, NULL);
4451 else
4453 tmp_then = tmp_var;
4454 tmp_else = tmp_var;
4455 tmp_join = tmp_var;
4458 e = split_block (bb, NULL);
4459 cond_bb = e->src;
4460 bb = e->dest;
4461 remove_edge (e);
4463 then_bb = create_empty_bb (cond_bb);
4464 else_bb = create_empty_bb (then_bb);
4465 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4466 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4468 stmt = gimple_build_cond_empty (cond);
4469 gsi = gsi_start_bb (cond_bb);
4470 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4472 gsi = gsi_start_bb (then_bb);
4473 stmt = gimple_build_assign (tmp_then, val);
4474 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4476 gsi = gsi_start_bb (else_bb);
4477 stmt = gimple_build_assign
4478 (tmp_else, build_int_cst (unsigned_type_node, 1));
4479 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4481 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4482 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4483 add_bb_to_loop (then_bb, cond_bb->loop_father);
4484 add_bb_to_loop (else_bb, cond_bb->loop_father);
4485 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4486 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4488 if (gimple_in_ssa_p (cfun))
4490 gimple_phi phi = create_phi_node (tmp_join, bb);
4491 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4492 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4495 val = tmp_join;
4498 gsi = gsi_start_bb (bb);
4499 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4500 false, GSI_CONTINUE_LINKING);
4503 gsi = gsi_last_bb (bb);
4504 t = gimple_omp_parallel_data_arg (entry_stmt);
4505 if (t == NULL)
4506 t1 = null_pointer_node;
4507 else
4508 t1 = build_fold_addr_expr (t);
4509 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4511 vec_alloc (args, 4 + vec_safe_length (ws_args));
4512 args->quick_push (t2);
4513 args->quick_push (t1);
4514 args->quick_push (val);
4515 if (ws_args)
4516 args->splice (*ws_args);
4517 args->quick_push (flags);
4519 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4520 builtin_decl_explicit (start_ix), args);
4522 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4523 false, GSI_CONTINUE_LINKING);
4526 /* Insert a function call whose name is FUNC_NAME with the information from
4527 ENTRY_STMT into the basic_block BB. */
4529 static void
4530 expand_cilk_for_call (basic_block bb, gimple_omp_parallel entry_stmt,
4531 vec <tree, va_gc> *ws_args)
4533 tree t, t1, t2;
4534 gimple_stmt_iterator gsi;
4535 vec <tree, va_gc> *args;
4537 gcc_assert (vec_safe_length (ws_args) == 2);
4538 tree func_name = (*ws_args)[0];
4539 tree grain = (*ws_args)[1];
4541 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
4542 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
4543 gcc_assert (count != NULL_TREE);
4544 count = OMP_CLAUSE_OPERAND (count, 0);
4546 gsi = gsi_last_bb (bb);
4547 t = gimple_omp_parallel_data_arg (entry_stmt);
4548 if (t == NULL)
4549 t1 = null_pointer_node;
4550 else
4551 t1 = build_fold_addr_expr (t);
4552 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4554 vec_alloc (args, 4);
4555 args->quick_push (t2);
4556 args->quick_push (t1);
4557 args->quick_push (count);
4558 args->quick_push (grain);
4559 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
4561 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
4562 GSI_CONTINUE_LINKING);
4565 /* Build the function call to GOMP_task to actually
4566 generate the task operation. BB is the block where to insert the code. */
4568 static void
4569 expand_task_call (basic_block bb, gimple_omp_task entry_stmt)
4571 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4572 gimple_stmt_iterator gsi;
4573 location_t loc = gimple_location (entry_stmt);
4575 clauses = gimple_omp_task_clauses (entry_stmt);
4577 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4578 if (c)
4579 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4580 else
4581 cond = boolean_true_node;
4583 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4584 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4585 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4586 flags = build_int_cst (unsigned_type_node,
4587 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4589 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4590 if (c)
4592 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4593 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4594 build_int_cst (unsigned_type_node, 2),
4595 build_int_cst (unsigned_type_node, 0));
4596 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4598 if (depend)
4599 depend = OMP_CLAUSE_DECL (depend);
4600 else
4601 depend = build_int_cst (ptr_type_node, 0);
4603 gsi = gsi_last_bb (bb);
4604 t = gimple_omp_task_data_arg (entry_stmt);
4605 if (t == NULL)
4606 t2 = null_pointer_node;
4607 else
4608 t2 = build_fold_addr_expr_loc (loc, t);
4609 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4610 t = gimple_omp_task_copy_fn (entry_stmt);
4611 if (t == NULL)
4612 t3 = null_pointer_node;
4613 else
4614 t3 = build_fold_addr_expr_loc (loc, t);
4616 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4617 8, t1, t2, t3,
4618 gimple_omp_task_arg_size (entry_stmt),
4619 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4620 depend);
4622 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4623 false, GSI_CONTINUE_LINKING);
4627 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4628 catch handler and return it. This prevents programs from violating the
4629 structured block semantics with throws. */
4631 static gimple_seq
4632 maybe_catch_exception (gimple_seq body)
4634 gimple g;
4635 tree decl;
4637 if (!flag_exceptions)
4638 return body;
4640 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4641 decl = lang_hooks.eh_protect_cleanup_actions ();
4642 else
4643 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4645 g = gimple_build_eh_must_not_throw (decl);
4646 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4647 GIMPLE_TRY_CATCH);
4649 return gimple_seq_alloc_with_stmt (g);
4652 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4654 static tree
4655 vec2chain (vec<tree, va_gc> *v)
4657 tree chain = NULL_TREE, t;
4658 unsigned ix;
4660 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4662 DECL_CHAIN (t) = chain;
4663 chain = t;
4666 return chain;
4670 /* Remove barriers in REGION->EXIT's block. Note that this is only
4671 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4672 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4673 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4674 removed. */
4676 static void
4677 remove_exit_barrier (struct omp_region *region)
4679 gimple_stmt_iterator gsi;
4680 basic_block exit_bb;
4681 edge_iterator ei;
4682 edge e;
4683 gimple stmt;
4684 int any_addressable_vars = -1;
4686 exit_bb = region->exit;
4688 /* If the parallel region doesn't return, we don't have REGION->EXIT
4689 block at all. */
4690 if (! exit_bb)
4691 return;
4693 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4694 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4695 statements that can appear in between are extremely limited -- no
4696 memory operations at all. Here, we allow nothing at all, so the
4697 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4698 gsi = gsi_last_bb (exit_bb);
4699 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4700 gsi_prev (&gsi);
4701 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4702 return;
4704 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4706 gsi = gsi_last_bb (e->src);
4707 if (gsi_end_p (gsi))
4708 continue;
4709 stmt = gsi_stmt (gsi);
4710 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4711 && !gimple_omp_return_nowait_p (stmt))
4713 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4714 in many cases. If there could be tasks queued, the barrier
4715 might be needed to let the tasks run before some local
4716 variable of the parallel that the task uses as shared
4717 runs out of scope. The task can be spawned either
4718 from within current function (this would be easy to check)
4719 or from some function it calls and gets passed an address
4720 of such a variable. */
4721 if (any_addressable_vars < 0)
4723 gimple_omp_parallel parallel_stmt =
4724 as_a <gimple_omp_parallel> (last_stmt (region->entry));
4725 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4726 tree local_decls, block, decl;
4727 unsigned ix;
4729 any_addressable_vars = 0;
4730 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4731 if (TREE_ADDRESSABLE (decl))
4733 any_addressable_vars = 1;
4734 break;
4736 for (block = gimple_block (stmt);
4737 !any_addressable_vars
4738 && block
4739 && TREE_CODE (block) == BLOCK;
4740 block = BLOCK_SUPERCONTEXT (block))
4742 for (local_decls = BLOCK_VARS (block);
4743 local_decls;
4744 local_decls = DECL_CHAIN (local_decls))
4745 if (TREE_ADDRESSABLE (local_decls))
4747 any_addressable_vars = 1;
4748 break;
4750 if (block == gimple_block (parallel_stmt))
4751 break;
4754 if (!any_addressable_vars)
4755 gimple_omp_return_set_nowait (stmt);
4760 static void
4761 remove_exit_barriers (struct omp_region *region)
4763 if (region->type == GIMPLE_OMP_PARALLEL)
4764 remove_exit_barrier (region);
4766 if (region->inner)
4768 region = region->inner;
4769 remove_exit_barriers (region);
4770 while (region->next)
4772 region = region->next;
4773 remove_exit_barriers (region);
4778 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4779 calls. These can't be declared as const functions, but
4780 within one parallel body they are constant, so they can be
4781 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4782 which are declared const. Similarly for task body, except
4783 that in untied task omp_get_thread_num () can change at any task
4784 scheduling point. */
4786 static void
4787 optimize_omp_library_calls (gimple entry_stmt)
4789 basic_block bb;
4790 gimple_stmt_iterator gsi;
4791 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4792 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4793 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4794 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4795 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4796 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4797 OMP_CLAUSE_UNTIED) != NULL);
4799 FOR_EACH_BB_FN (bb, cfun)
4800 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4802 gimple call = gsi_stmt (gsi);
4803 tree decl;
4805 if (is_gimple_call (call)
4806 && (decl = gimple_call_fndecl (call))
4807 && DECL_EXTERNAL (decl)
4808 && TREE_PUBLIC (decl)
4809 && DECL_INITIAL (decl) == NULL)
4811 tree built_in;
4813 if (DECL_NAME (decl) == thr_num_id)
4815 /* In #pragma omp task untied omp_get_thread_num () can change
4816 during the execution of the task region. */
4817 if (untied_task)
4818 continue;
4819 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4821 else if (DECL_NAME (decl) == num_thr_id)
4822 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4823 else
4824 continue;
4826 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4827 || gimple_call_num_args (call) != 0)
4828 continue;
4830 if (flag_exceptions && !TREE_NOTHROW (decl))
4831 continue;
4833 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4834 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4835 TREE_TYPE (TREE_TYPE (built_in))))
4836 continue;
4838 gimple_call_set_fndecl (call, built_in);
4843 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4844 regimplified. */
4846 static tree
4847 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4849 tree t = *tp;
4851 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4852 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4853 return t;
4855 if (TREE_CODE (t) == ADDR_EXPR)
4856 recompute_tree_invariant_for_addr_expr (t);
4858 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4859 return NULL_TREE;
4862 /* Prepend TO = FROM assignment before *GSI_P. */
4864 static void
4865 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4867 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4868 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4869 true, GSI_SAME_STMT);
4870 gimple stmt = gimple_build_assign (to, from);
4871 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4872 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4873 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4875 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4876 gimple_regimplify_operands (stmt, &gsi);
4880 /* Expand the OpenMP parallel or task directive starting at REGION. */
4882 static void
4883 expand_omp_taskreg (struct omp_region *region)
4885 basic_block entry_bb, exit_bb, new_bb;
4886 struct function *child_cfun;
4887 tree child_fn, block, t;
4888 gimple_stmt_iterator gsi;
4889 gimple entry_stmt, stmt;
4890 edge e;
4891 vec<tree, va_gc> *ws_args;
4893 entry_stmt = last_stmt (region->entry);
4894 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4895 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4897 entry_bb = region->entry;
4898 exit_bb = region->exit;
4900 bool is_cilk_for
4901 = (flag_cilkplus
4902 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
4903 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
4904 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
4906 if (is_cilk_for)
4907 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
4908 and the inner statement contains the name of the built-in function
4909 and grain. */
4910 ws_args = region->inner->ws_args;
4911 else if (is_combined_parallel (region))
4912 ws_args = region->ws_args;
4913 else
4914 ws_args = NULL;
4916 if (child_cfun->cfg)
4918 /* Due to inlining, it may happen that we have already outlined
4919 the region, in which case all we need to do is make the
4920 sub-graph unreachable and emit the parallel call. */
4921 edge entry_succ_e, exit_succ_e;
4923 entry_succ_e = single_succ_edge (entry_bb);
4925 gsi = gsi_last_bb (entry_bb);
4926 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4927 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4928 gsi_remove (&gsi, true);
4930 new_bb = entry_bb;
4931 if (exit_bb)
4933 exit_succ_e = single_succ_edge (exit_bb);
4934 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4936 remove_edge_and_dominated_blocks (entry_succ_e);
4938 else
4940 unsigned srcidx, dstidx, num;
4942 /* If the parallel region needs data sent from the parent
4943 function, then the very first statement (except possible
4944 tree profile counter updates) of the parallel body
4945 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4946 &.OMP_DATA_O is passed as an argument to the child function,
4947 we need to replace it with the argument as seen by the child
4948 function.
4950 In most cases, this will end up being the identity assignment
4951 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4952 a function call that has been inlined, the original PARM_DECL
4953 .OMP_DATA_I may have been converted into a different local
4954 variable. In which case, we need to keep the assignment. */
4955 if (gimple_omp_taskreg_data_arg (entry_stmt))
4957 basic_block entry_succ_bb = single_succ (entry_bb);
4958 tree arg, narg;
4959 gimple parcopy_stmt = NULL;
4961 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4963 gimple stmt;
4965 gcc_assert (!gsi_end_p (gsi));
4966 stmt = gsi_stmt (gsi);
4967 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4968 continue;
4970 if (gimple_num_ops (stmt) == 2)
4972 tree arg = gimple_assign_rhs1 (stmt);
4974 /* We're ignore the subcode because we're
4975 effectively doing a STRIP_NOPS. */
4977 if (TREE_CODE (arg) == ADDR_EXPR
4978 && TREE_OPERAND (arg, 0)
4979 == gimple_omp_taskreg_data_arg (entry_stmt))
4981 parcopy_stmt = stmt;
4982 break;
4987 gcc_assert (parcopy_stmt != NULL);
4988 arg = DECL_ARGUMENTS (child_fn);
4990 if (!gimple_in_ssa_p (cfun))
4992 if (gimple_assign_lhs (parcopy_stmt) == arg)
4993 gsi_remove (&gsi, true);
4994 else
4996 /* ?? Is setting the subcode really necessary ?? */
4997 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4998 gimple_assign_set_rhs1 (parcopy_stmt, arg);
5001 else
5003 /* If we are in ssa form, we must load the value from the default
5004 definition of the argument. That should not be defined now,
5005 since the argument is not used uninitialized. */
5006 gcc_assert (ssa_default_def (cfun, arg) == NULL);
5007 narg = make_ssa_name (arg, gimple_build_nop ());
5008 set_ssa_default_def (cfun, arg, narg);
5009 /* ?? Is setting the subcode really necessary ?? */
5010 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
5011 gimple_assign_set_rhs1 (parcopy_stmt, narg);
5012 update_stmt (parcopy_stmt);
5016 /* Declare local variables needed in CHILD_CFUN. */
5017 block = DECL_INITIAL (child_fn);
5018 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
5019 /* The gimplifier could record temporaries in parallel/task block
5020 rather than in containing function's local_decls chain,
5021 which would mean cgraph missed finalizing them. Do it now. */
5022 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
5023 if (TREE_CODE (t) == VAR_DECL
5024 && TREE_STATIC (t)
5025 && !DECL_EXTERNAL (t))
5026 varpool_node::finalize_decl (t);
5027 DECL_SAVED_TREE (child_fn) = NULL;
5028 /* We'll create a CFG for child_fn, so no gimple body is needed. */
5029 gimple_set_body (child_fn, NULL);
5030 TREE_USED (block) = 1;
5032 /* Reset DECL_CONTEXT on function arguments. */
5033 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
5034 DECL_CONTEXT (t) = child_fn;
5036 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
5037 so that it can be moved to the child function. */
5038 gsi = gsi_last_bb (entry_bb);
5039 stmt = gsi_stmt (gsi);
5040 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
5041 || gimple_code (stmt) == GIMPLE_OMP_TASK));
5042 gsi_remove (&gsi, true);
5043 e = split_block (entry_bb, stmt);
5044 entry_bb = e->dest;
5045 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5047 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5048 if (exit_bb)
5050 gsi = gsi_last_bb (exit_bb);
5051 gcc_assert (!gsi_end_p (gsi)
5052 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5053 stmt = gimple_build_return (NULL);
5054 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5055 gsi_remove (&gsi, true);
5058 /* Move the parallel region into CHILD_CFUN. */
5060 if (gimple_in_ssa_p (cfun))
5062 init_tree_ssa (child_cfun);
5063 init_ssa_operands (child_cfun);
5064 child_cfun->gimple_df->in_ssa_p = true;
5065 block = NULL_TREE;
5067 else
5068 block = gimple_block (entry_stmt);
5070 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
5071 if (exit_bb)
5072 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
5073 /* When the OMP expansion process cannot guarantee an up-to-date
5074 loop tree arrange for the child function to fixup loops. */
5075 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
5076 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
5078 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
5079 num = vec_safe_length (child_cfun->local_decls);
5080 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
5082 t = (*child_cfun->local_decls)[srcidx];
5083 if (DECL_CONTEXT (t) == cfun->decl)
5084 continue;
5085 if (srcidx != dstidx)
5086 (*child_cfun->local_decls)[dstidx] = t;
5087 dstidx++;
5089 if (dstidx != num)
5090 vec_safe_truncate (child_cfun->local_decls, dstidx);
5092 /* Inform the callgraph about the new function. */
5093 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
5094 cgraph_node::add_new_function (child_fn, true);
5096 /* Fix the callgraph edges for child_cfun. Those for cfun will be
5097 fixed in a following pass. */
5098 push_cfun (child_cfun);
5099 if (optimize)
5100 optimize_omp_library_calls (entry_stmt);
5101 cgraph_edge::rebuild_edges ();
5103 /* Some EH regions might become dead, see PR34608. If
5104 pass_cleanup_cfg isn't the first pass to happen with the
5105 new child, these dead EH edges might cause problems.
5106 Clean them up now. */
5107 if (flag_exceptions)
5109 basic_block bb;
5110 bool changed = false;
5112 FOR_EACH_BB_FN (bb, cfun)
5113 changed |= gimple_purge_dead_eh_edges (bb);
5114 if (changed)
5115 cleanup_tree_cfg ();
5117 if (gimple_in_ssa_p (cfun))
5118 update_ssa (TODO_update_ssa);
5119 pop_cfun ();
5122 /* Emit a library call to launch the children threads. */
5123 if (is_cilk_for)
5124 expand_cilk_for_call (new_bb,
5125 as_a <gimple_omp_parallel> (entry_stmt), ws_args);
5126 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
5127 expand_parallel_call (region, new_bb,
5128 as_a <gimple_omp_parallel> (entry_stmt), ws_args);
5129 else
5130 expand_task_call (new_bb, as_a <gimple_omp_task> (entry_stmt));
5131 if (gimple_in_ssa_p (cfun))
5132 update_ssa (TODO_update_ssa_only_virtuals);
5136 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
5137 of the combined collapse > 1 loop constructs, generate code like:
5138 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
5139 if (cond3 is <)
5140 adj = STEP3 - 1;
5141 else
5142 adj = STEP3 + 1;
5143 count3 = (adj + N32 - N31) / STEP3;
5144 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
5145 if (cond2 is <)
5146 adj = STEP2 - 1;
5147 else
5148 adj = STEP2 + 1;
5149 count2 = (adj + N22 - N21) / STEP2;
5150 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
5151 if (cond1 is <)
5152 adj = STEP1 - 1;
5153 else
5154 adj = STEP1 + 1;
5155 count1 = (adj + N12 - N11) / STEP1;
5156 count = count1 * count2 * count3;
5157 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
5158 count = 0;
5159 and set ZERO_ITER_BB to that bb. If this isn't the outermost
5160 of the combined loop constructs, just initialize COUNTS array
5161 from the _looptemp_ clauses. */
5163 /* NOTE: It *could* be better to moosh all of the BBs together,
5164 creating one larger BB with all the computation and the unexpected
5165 jump at the end. I.e.
5167 bool zero3, zero2, zero1, zero;
5169 zero3 = N32 c3 N31;
5170 count3 = (N32 - N31) /[cl] STEP3;
5171 zero2 = N22 c2 N21;
5172 count2 = (N22 - N21) /[cl] STEP2;
5173 zero1 = N12 c1 N11;
5174 count1 = (N12 - N11) /[cl] STEP1;
5175 zero = zero3 || zero2 || zero1;
5176 count = count1 * count2 * count3;
5177 if (__builtin_expect(zero, false)) goto zero_iter_bb;
5179 After all, we expect the zero=false, and thus we expect to have to
5180 evaluate all of the comparison expressions, so short-circuiting
5181 oughtn't be a win. Since the condition isn't protecting a
5182 denominator, we're not concerned about divide-by-zero, so we can
5183 fully evaluate count even if a numerator turned out to be wrong.
5185 It seems like putting this all together would create much better
5186 scheduling opportunities, and less pressure on the chip's branch
5187 predictor. */
5189 static void
5190 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5191 basic_block &entry_bb, tree *counts,
5192 basic_block &zero_iter_bb, int &first_zero_iter,
5193 basic_block &l2_dom_bb)
5195 tree t, type = TREE_TYPE (fd->loop.v);
5196 edge e, ne;
5197 int i;
5199 /* Collapsed loops need work for expansion into SSA form. */
5200 gcc_assert (!gimple_in_ssa_p (cfun));
5202 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5203 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5205 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5206 isn't supposed to be handled, as the inner loop doesn't
5207 use it. */
5208 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5209 OMP_CLAUSE__LOOPTEMP_);
5210 gcc_assert (innerc);
5211 for (i = 0; i < fd->collapse; i++)
5213 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5214 OMP_CLAUSE__LOOPTEMP_);
5215 gcc_assert (innerc);
5216 if (i)
5217 counts[i] = OMP_CLAUSE_DECL (innerc);
5218 else
5219 counts[0] = NULL_TREE;
5221 return;
5224 for (i = 0; i < fd->collapse; i++)
5226 tree itype = TREE_TYPE (fd->loops[i].v);
5228 if (SSA_VAR_P (fd->loop.n2)
5229 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5230 fold_convert (itype, fd->loops[i].n1),
5231 fold_convert (itype, fd->loops[i].n2)))
5232 == NULL_TREE || !integer_onep (t)))
5234 gimple_cond cond_stmt;
5235 tree n1, n2;
5236 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5237 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5238 true, GSI_SAME_STMT);
5239 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5240 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5241 true, GSI_SAME_STMT);
5242 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5243 NULL_TREE, NULL_TREE);
5244 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
5245 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
5246 expand_omp_regimplify_p, NULL, NULL)
5247 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
5248 expand_omp_regimplify_p, NULL, NULL))
5250 *gsi = gsi_for_stmt (cond_stmt);
5251 gimple_regimplify_operands (cond_stmt, gsi);
5253 e = split_block (entry_bb, cond_stmt);
5254 if (zero_iter_bb == NULL)
5256 gimple_assign assign_stmt;
5257 first_zero_iter = i;
5258 zero_iter_bb = create_empty_bb (entry_bb);
5259 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5260 *gsi = gsi_after_labels (zero_iter_bb);
5261 assign_stmt = gimple_build_assign (fd->loop.n2,
5262 build_zero_cst (type));
5263 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
5264 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5265 entry_bb);
5267 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5268 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5269 e->flags = EDGE_TRUE_VALUE;
5270 e->probability = REG_BR_PROB_BASE - ne->probability;
5271 if (l2_dom_bb == NULL)
5272 l2_dom_bb = entry_bb;
5273 entry_bb = e->dest;
5274 *gsi = gsi_last_bb (entry_bb);
5277 if (POINTER_TYPE_P (itype))
5278 itype = signed_type_for (itype);
5279 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5280 ? -1 : 1));
5281 t = fold_build2 (PLUS_EXPR, itype,
5282 fold_convert (itype, fd->loops[i].step), t);
5283 t = fold_build2 (PLUS_EXPR, itype, t,
5284 fold_convert (itype, fd->loops[i].n2));
5285 t = fold_build2 (MINUS_EXPR, itype, t,
5286 fold_convert (itype, fd->loops[i].n1));
5287 /* ?? We could probably use CEIL_DIV_EXPR instead of
5288 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5289 generate the same code in the end because generically we
5290 don't know that the values involved must be negative for
5291 GT?? */
5292 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5293 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5294 fold_build1 (NEGATE_EXPR, itype, t),
5295 fold_build1 (NEGATE_EXPR, itype,
5296 fold_convert (itype,
5297 fd->loops[i].step)));
5298 else
5299 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5300 fold_convert (itype, fd->loops[i].step));
5301 t = fold_convert (type, t);
5302 if (TREE_CODE (t) == INTEGER_CST)
5303 counts[i] = t;
5304 else
5306 counts[i] = create_tmp_reg (type, ".count");
5307 expand_omp_build_assign (gsi, counts[i], t);
5309 if (SSA_VAR_P (fd->loop.n2))
5311 if (i == 0)
5312 t = counts[0];
5313 else
5314 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5315 expand_omp_build_assign (gsi, fd->loop.n2, t);
5321 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5322 T = V;
5323 V3 = N31 + (T % count3) * STEP3;
5324 T = T / count3;
5325 V2 = N21 + (T % count2) * STEP2;
5326 T = T / count2;
5327 V1 = N11 + T * STEP1;
5328 if this loop doesn't have an inner loop construct combined with it.
5329 If it does have an inner loop construct combined with it and the
5330 iteration count isn't known constant, store values from counts array
5331 into its _looptemp_ temporaries instead. */
5333 static void
5334 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5335 tree *counts, gimple inner_stmt, tree startvar)
5337 int i;
5338 if (gimple_omp_for_combined_p (fd->for_stmt))
5340 /* If fd->loop.n2 is constant, then no propagation of the counts
5341 is needed, they are constant. */
5342 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5343 return;
5345 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5346 ? gimple_omp_parallel_clauses (inner_stmt)
5347 : gimple_omp_for_clauses (inner_stmt);
5348 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5349 isn't supposed to be handled, as the inner loop doesn't
5350 use it. */
5351 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5352 gcc_assert (innerc);
5353 for (i = 0; i < fd->collapse; i++)
5355 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5356 OMP_CLAUSE__LOOPTEMP_);
5357 gcc_assert (innerc);
5358 if (i)
5360 tree tem = OMP_CLAUSE_DECL (innerc);
5361 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5362 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5363 false, GSI_CONTINUE_LINKING);
5364 gimple_assign stmt = gimple_build_assign (tem, t);
5365 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5368 return;
5371 tree type = TREE_TYPE (fd->loop.v);
5372 tree tem = create_tmp_reg (type, ".tem");
5373 gimple_assign stmt = gimple_build_assign (tem, startvar);
5374 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5376 for (i = fd->collapse - 1; i >= 0; i--)
5378 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5379 itype = vtype;
5380 if (POINTER_TYPE_P (vtype))
5381 itype = signed_type_for (vtype);
5382 if (i != 0)
5383 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5384 else
5385 t = tem;
5386 t = fold_convert (itype, t);
5387 t = fold_build2 (MULT_EXPR, itype, t,
5388 fold_convert (itype, fd->loops[i].step));
5389 if (POINTER_TYPE_P (vtype))
5390 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5391 else
5392 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5393 t = force_gimple_operand_gsi (gsi, t,
5394 DECL_P (fd->loops[i].v)
5395 && TREE_ADDRESSABLE (fd->loops[i].v),
5396 NULL_TREE, false,
5397 GSI_CONTINUE_LINKING);
5398 stmt = gimple_build_assign (fd->loops[i].v, t);
5399 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5400 if (i != 0)
5402 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5403 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5404 false, GSI_CONTINUE_LINKING);
5405 stmt = gimple_build_assign (tem, t);
5406 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5412 /* Helper function for expand_omp_for_*. Generate code like:
5413 L10:
5414 V3 += STEP3;
5415 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5416 L11:
5417 V3 = N31;
5418 V2 += STEP2;
5419 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5420 L12:
5421 V2 = N21;
5422 V1 += STEP1;
5423 goto BODY_BB; */
5425 static basic_block
5426 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5427 basic_block body_bb)
5429 basic_block last_bb, bb, collapse_bb = NULL;
5430 int i;
5431 gimple_stmt_iterator gsi;
5432 edge e;
5433 tree t;
5434 gimple stmt;
5436 last_bb = cont_bb;
5437 for (i = fd->collapse - 1; i >= 0; i--)
5439 tree vtype = TREE_TYPE (fd->loops[i].v);
5441 bb = create_empty_bb (last_bb);
5442 add_bb_to_loop (bb, last_bb->loop_father);
5443 gsi = gsi_start_bb (bb);
5445 if (i < fd->collapse - 1)
5447 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5448 e->probability = REG_BR_PROB_BASE / 8;
5450 t = fd->loops[i + 1].n1;
5451 t = force_gimple_operand_gsi (&gsi, t,
5452 DECL_P (fd->loops[i + 1].v)
5453 && TREE_ADDRESSABLE (fd->loops[i
5454 + 1].v),
5455 NULL_TREE, false,
5456 GSI_CONTINUE_LINKING);
5457 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5458 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5460 else
5461 collapse_bb = bb;
5463 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5465 if (POINTER_TYPE_P (vtype))
5466 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5467 else
5468 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5469 t = force_gimple_operand_gsi (&gsi, t,
5470 DECL_P (fd->loops[i].v)
5471 && TREE_ADDRESSABLE (fd->loops[i].v),
5472 NULL_TREE, false, GSI_CONTINUE_LINKING);
5473 stmt = gimple_build_assign (fd->loops[i].v, t);
5474 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5476 if (i > 0)
5478 t = fd->loops[i].n2;
5479 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5480 false, GSI_CONTINUE_LINKING);
5481 tree v = fd->loops[i].v;
5482 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5483 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5484 false, GSI_CONTINUE_LINKING);
5485 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5486 stmt = gimple_build_cond_empty (t);
5487 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5488 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5489 e->probability = REG_BR_PROB_BASE * 7 / 8;
5491 else
5492 make_edge (bb, body_bb, EDGE_FALLTHRU);
5493 last_bb = bb;
5496 return collapse_bb;
5500 /* A subroutine of expand_omp_for. Generate code for a parallel
5501 loop with any schedule. Given parameters:
5503 for (V = N1; V cond N2; V += STEP) BODY;
5505 where COND is "<" or ">", we generate pseudocode
5507 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5508 if (more) goto L0; else goto L3;
5510 V = istart0;
5511 iend = iend0;
5513 BODY;
5514 V += STEP;
5515 if (V cond iend) goto L1; else goto L2;
5517 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5520 If this is a combined omp parallel loop, instead of the call to
5521 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5522 If this is gimple_omp_for_combined_p loop, then instead of assigning
5523 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5524 inner GIMPLE_OMP_FOR and V += STEP; and
5525 if (V cond iend) goto L1; else goto L2; are removed.
5527 For collapsed loops, given parameters:
5528 collapse(3)
5529 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5530 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5531 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5532 BODY;
5534 we generate pseudocode
5536 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5537 if (cond3 is <)
5538 adj = STEP3 - 1;
5539 else
5540 adj = STEP3 + 1;
5541 count3 = (adj + N32 - N31) / STEP3;
5542 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5543 if (cond2 is <)
5544 adj = STEP2 - 1;
5545 else
5546 adj = STEP2 + 1;
5547 count2 = (adj + N22 - N21) / STEP2;
5548 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5549 if (cond1 is <)
5550 adj = STEP1 - 1;
5551 else
5552 adj = STEP1 + 1;
5553 count1 = (adj + N12 - N11) / STEP1;
5554 count = count1 * count2 * count3;
5555 goto Z1;
5557 count = 0;
5559 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5560 if (more) goto L0; else goto L3;
5562 V = istart0;
5563 T = V;
5564 V3 = N31 + (T % count3) * STEP3;
5565 T = T / count3;
5566 V2 = N21 + (T % count2) * STEP2;
5567 T = T / count2;
5568 V1 = N11 + T * STEP1;
5569 iend = iend0;
5571 BODY;
5572 V += 1;
5573 if (V < iend) goto L10; else goto L2;
5574 L10:
5575 V3 += STEP3;
5576 if (V3 cond3 N32) goto L1; else goto L11;
5577 L11:
5578 V3 = N31;
5579 V2 += STEP2;
5580 if (V2 cond2 N22) goto L1; else goto L12;
5581 L12:
5582 V2 = N21;
5583 V1 += STEP1;
5584 goto L1;
5586 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5591 static void
5592 expand_omp_for_generic (struct omp_region *region,
5593 struct omp_for_data *fd,
5594 enum built_in_function start_fn,
5595 enum built_in_function next_fn,
5596 gimple inner_stmt)
5598 tree type, istart0, iend0, iend;
5599 tree t, vmain, vback, bias = NULL_TREE;
5600 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5601 basic_block l2_bb = NULL, l3_bb = NULL;
5602 gimple_stmt_iterator gsi;
5603 gimple_assign assign_stmt;
5604 bool in_combined_parallel = is_combined_parallel (region);
5605 bool broken_loop = region->cont == NULL;
5606 edge e, ne;
5607 tree *counts = NULL;
5608 int i;
5610 gcc_assert (!broken_loop || !in_combined_parallel);
5611 gcc_assert (fd->iter_type == long_integer_type_node
5612 || !in_combined_parallel);
5614 type = TREE_TYPE (fd->loop.v);
5615 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5616 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5617 TREE_ADDRESSABLE (istart0) = 1;
5618 TREE_ADDRESSABLE (iend0) = 1;
5620 /* See if we need to bias by LLONG_MIN. */
5621 if (fd->iter_type == long_long_unsigned_type_node
5622 && TREE_CODE (type) == INTEGER_TYPE
5623 && !TYPE_UNSIGNED (type))
5625 tree n1, n2;
5627 if (fd->loop.cond_code == LT_EXPR)
5629 n1 = fd->loop.n1;
5630 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5632 else
5634 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5635 n2 = fd->loop.n1;
5637 if (TREE_CODE (n1) != INTEGER_CST
5638 || TREE_CODE (n2) != INTEGER_CST
5639 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5640 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5643 entry_bb = region->entry;
5644 cont_bb = region->cont;
5645 collapse_bb = NULL;
5646 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5647 gcc_assert (broken_loop
5648 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5649 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5650 l1_bb = single_succ (l0_bb);
5651 if (!broken_loop)
5653 l2_bb = create_empty_bb (cont_bb);
5654 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5655 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5657 else
5658 l2_bb = NULL;
5659 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5660 exit_bb = region->exit;
5662 gsi = gsi_last_bb (entry_bb);
5664 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5665 if (fd->collapse > 1)
5667 int first_zero_iter = -1;
5668 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5670 counts = XALLOCAVEC (tree, fd->collapse);
5671 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5672 zero_iter_bb, first_zero_iter,
5673 l2_dom_bb);
5675 if (zero_iter_bb)
5677 /* Some counts[i] vars might be uninitialized if
5678 some loop has zero iterations. But the body shouldn't
5679 be executed in that case, so just avoid uninit warnings. */
5680 for (i = first_zero_iter; i < fd->collapse; i++)
5681 if (SSA_VAR_P (counts[i]))
5682 TREE_NO_WARNING (counts[i]) = 1;
5683 gsi_prev (&gsi);
5684 e = split_block (entry_bb, gsi_stmt (gsi));
5685 entry_bb = e->dest;
5686 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5687 gsi = gsi_last_bb (entry_bb);
5688 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5689 get_immediate_dominator (CDI_DOMINATORS,
5690 zero_iter_bb));
5693 if (in_combined_parallel)
5695 /* In a combined parallel loop, emit a call to
5696 GOMP_loop_foo_next. */
5697 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5698 build_fold_addr_expr (istart0),
5699 build_fold_addr_expr (iend0));
5701 else
5703 tree t0, t1, t2, t3, t4;
5704 /* If this is not a combined parallel loop, emit a call to
5705 GOMP_loop_foo_start in ENTRY_BB. */
5706 t4 = build_fold_addr_expr (iend0);
5707 t3 = build_fold_addr_expr (istart0);
5708 t2 = fold_convert (fd->iter_type, fd->loop.step);
5709 t1 = fd->loop.n2;
5710 t0 = fd->loop.n1;
5711 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5713 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5714 OMP_CLAUSE__LOOPTEMP_);
5715 gcc_assert (innerc);
5716 t0 = OMP_CLAUSE_DECL (innerc);
5717 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5718 OMP_CLAUSE__LOOPTEMP_);
5719 gcc_assert (innerc);
5720 t1 = OMP_CLAUSE_DECL (innerc);
5722 if (POINTER_TYPE_P (TREE_TYPE (t0))
5723 && TYPE_PRECISION (TREE_TYPE (t0))
5724 != TYPE_PRECISION (fd->iter_type))
5726 /* Avoid casting pointers to integer of a different size. */
5727 tree itype = signed_type_for (type);
5728 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5729 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5731 else
5733 t1 = fold_convert (fd->iter_type, t1);
5734 t0 = fold_convert (fd->iter_type, t0);
5736 if (bias)
5738 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5739 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5741 if (fd->iter_type == long_integer_type_node)
5743 if (fd->chunk_size)
5745 t = fold_convert (fd->iter_type, fd->chunk_size);
5746 t = build_call_expr (builtin_decl_explicit (start_fn),
5747 6, t0, t1, t2, t, t3, t4);
5749 else
5750 t = build_call_expr (builtin_decl_explicit (start_fn),
5751 5, t0, t1, t2, t3, t4);
5753 else
5755 tree t5;
5756 tree c_bool_type;
5757 tree bfn_decl;
5759 /* The GOMP_loop_ull_*start functions have additional boolean
5760 argument, true for < loops and false for > loops.
5761 In Fortran, the C bool type can be different from
5762 boolean_type_node. */
5763 bfn_decl = builtin_decl_explicit (start_fn);
5764 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5765 t5 = build_int_cst (c_bool_type,
5766 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5767 if (fd->chunk_size)
5769 tree bfn_decl = builtin_decl_explicit (start_fn);
5770 t = fold_convert (fd->iter_type, fd->chunk_size);
5771 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5773 else
5774 t = build_call_expr (builtin_decl_explicit (start_fn),
5775 6, t5, t0, t1, t2, t3, t4);
5778 if (TREE_TYPE (t) != boolean_type_node)
5779 t = fold_build2 (NE_EXPR, boolean_type_node,
5780 t, build_int_cst (TREE_TYPE (t), 0));
5781 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5782 true, GSI_SAME_STMT);
5783 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5785 /* Remove the GIMPLE_OMP_FOR statement. */
5786 gsi_remove (&gsi, true);
5788 /* Iteration setup for sequential loop goes in L0_BB. */
5789 tree startvar = fd->loop.v;
5790 tree endvar = NULL_TREE;
5792 if (gimple_omp_for_combined_p (fd->for_stmt))
5794 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5795 && gimple_omp_for_kind (inner_stmt)
5796 == GF_OMP_FOR_KIND_SIMD);
5797 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5798 OMP_CLAUSE__LOOPTEMP_);
5799 gcc_assert (innerc);
5800 startvar = OMP_CLAUSE_DECL (innerc);
5801 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5802 OMP_CLAUSE__LOOPTEMP_);
5803 gcc_assert (innerc);
5804 endvar = OMP_CLAUSE_DECL (innerc);
5807 gsi = gsi_start_bb (l0_bb);
5808 t = istart0;
5809 if (bias)
5810 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5811 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5812 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5813 t = fold_convert (TREE_TYPE (startvar), t);
5814 t = force_gimple_operand_gsi (&gsi, t,
5815 DECL_P (startvar)
5816 && TREE_ADDRESSABLE (startvar),
5817 NULL_TREE, false, GSI_CONTINUE_LINKING);
5818 assign_stmt = gimple_build_assign (startvar, t);
5819 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5821 t = iend0;
5822 if (bias)
5823 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5824 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5825 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5826 t = fold_convert (TREE_TYPE (startvar), t);
5827 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5828 false, GSI_CONTINUE_LINKING);
5829 if (endvar)
5831 assign_stmt = gimple_build_assign (endvar, iend);
5832 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5833 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
5834 assign_stmt = gimple_build_assign (fd->loop.v, iend);
5835 else
5836 assign_stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, iend,
5837 NULL_TREE);
5838 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
5840 if (fd->collapse > 1)
5841 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5843 if (!broken_loop)
5845 /* Code to control the increment and predicate for the sequential
5846 loop goes in the CONT_BB. */
5847 gsi = gsi_last_bb (cont_bb);
5848 gimple_omp_continue cont_stmt =
5849 as_a <gimple_omp_continue> (gsi_stmt (gsi));
5850 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
5851 vmain = gimple_omp_continue_control_use (cont_stmt);
5852 vback = gimple_omp_continue_control_def (cont_stmt);
5854 if (!gimple_omp_for_combined_p (fd->for_stmt))
5856 if (POINTER_TYPE_P (type))
5857 t = fold_build_pointer_plus (vmain, fd->loop.step);
5858 else
5859 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5860 t = force_gimple_operand_gsi (&gsi, t,
5861 DECL_P (vback)
5862 && TREE_ADDRESSABLE (vback),
5863 NULL_TREE, true, GSI_SAME_STMT);
5864 assign_stmt = gimple_build_assign (vback, t);
5865 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
5867 t = build2 (fd->loop.cond_code, boolean_type_node,
5868 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5869 iend);
5870 gimple_cond cond_stmt = gimple_build_cond_empty (t);
5871 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
5874 /* Remove GIMPLE_OMP_CONTINUE. */
5875 gsi_remove (&gsi, true);
5877 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5878 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5880 /* Emit code to get the next parallel iteration in L2_BB. */
5881 gsi = gsi_start_bb (l2_bb);
5883 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5884 build_fold_addr_expr (istart0),
5885 build_fold_addr_expr (iend0));
5886 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5887 false, GSI_CONTINUE_LINKING);
5888 if (TREE_TYPE (t) != boolean_type_node)
5889 t = fold_build2 (NE_EXPR, boolean_type_node,
5890 t, build_int_cst (TREE_TYPE (t), 0));
5891 gimple_cond cond_stmt = gimple_build_cond_empty (t);
5892 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
5895 /* Add the loop cleanup function. */
5896 gsi = gsi_last_bb (exit_bb);
5897 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5898 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5899 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5900 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5901 else
5902 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5903 gimple_call call_stmt = gimple_build_call (t, 0);
5904 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5905 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5906 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
5907 gsi_remove (&gsi, true);
5909 /* Connect the new blocks. */
5910 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5911 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5913 if (!broken_loop)
5915 gimple_seq phis;
5917 e = find_edge (cont_bb, l3_bb);
5918 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5920 phis = phi_nodes (l3_bb);
5921 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5923 gimple phi = gsi_stmt (gsi);
5924 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5925 PHI_ARG_DEF_FROM_EDGE (phi, e));
5927 remove_edge (e);
5929 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5930 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5931 e = find_edge (cont_bb, l1_bb);
5932 if (gimple_omp_for_combined_p (fd->for_stmt))
5934 remove_edge (e);
5935 e = NULL;
5937 else if (fd->collapse > 1)
5939 remove_edge (e);
5940 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5942 else
5943 e->flags = EDGE_TRUE_VALUE;
5944 if (e)
5946 e->probability = REG_BR_PROB_BASE * 7 / 8;
5947 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5949 else
5951 e = find_edge (cont_bb, l2_bb);
5952 e->flags = EDGE_FALLTHRU;
5954 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5956 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5957 recompute_dominator (CDI_DOMINATORS, l2_bb));
5958 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5959 recompute_dominator (CDI_DOMINATORS, l3_bb));
5960 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5961 recompute_dominator (CDI_DOMINATORS, l0_bb));
5962 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5963 recompute_dominator (CDI_DOMINATORS, l1_bb));
5965 struct loop *outer_loop = alloc_loop ();
5966 outer_loop->header = l0_bb;
5967 outer_loop->latch = l2_bb;
5968 add_loop (outer_loop, l0_bb->loop_father);
5970 if (!gimple_omp_for_combined_p (fd->for_stmt))
5972 struct loop *loop = alloc_loop ();
5973 loop->header = l1_bb;
5974 /* The loop may have multiple latches. */
5975 add_loop (loop, outer_loop);
5981 /* A subroutine of expand_omp_for. Generate code for a parallel
5982 loop with static schedule and no specified chunk size. Given
5983 parameters:
5985 for (V = N1; V cond N2; V += STEP) BODY;
5987 where COND is "<" or ">", we generate pseudocode
5989 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5990 if (cond is <)
5991 adj = STEP - 1;
5992 else
5993 adj = STEP + 1;
5994 if ((__typeof (V)) -1 > 0 && cond is >)
5995 n = -(adj + N2 - N1) / -STEP;
5996 else
5997 n = (adj + N2 - N1) / STEP;
5998 q = n / nthreads;
5999 tt = n % nthreads;
6000 if (threadid < tt) goto L3; else goto L4;
6002 tt = 0;
6003 q = q + 1;
6005 s0 = q * threadid + tt;
6006 e0 = s0 + q;
6007 V = s0 * STEP + N1;
6008 if (s0 >= e0) goto L2; else goto L0;
6010 e = e0 * STEP + N1;
6012 BODY;
6013 V += STEP;
6014 if (V cond e) goto L1;
6018 static void
6019 expand_omp_for_static_nochunk (struct omp_region *region,
6020 struct omp_for_data *fd,
6021 gimple inner_stmt)
6023 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
6024 tree type, itype, vmain, vback;
6025 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
6026 basic_block body_bb, cont_bb, collapse_bb = NULL;
6027 basic_block fin_bb;
6028 gimple_stmt_iterator gsi;
6029 edge ep;
6030 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6031 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6032 bool broken_loop = region->cont == NULL;
6033 tree *counts = NULL;
6034 tree n1, n2, step;
6036 itype = type = TREE_TYPE (fd->loop.v);
6037 if (POINTER_TYPE_P (type))
6038 itype = signed_type_for (type);
6040 entry_bb = region->entry;
6041 cont_bb = region->cont;
6042 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6043 fin_bb = BRANCH_EDGE (entry_bb)->dest;
6044 gcc_assert (broken_loop
6045 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
6046 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
6047 body_bb = single_succ (seq_start_bb);
6048 if (!broken_loop)
6050 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6051 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6053 exit_bb = region->exit;
6055 /* Iteration space partitioning goes in ENTRY_BB. */
6056 gsi = gsi_last_bb (entry_bb);
6057 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6059 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6061 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6062 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6065 if (fd->collapse > 1)
6067 int first_zero_iter = -1;
6068 basic_block l2_dom_bb = NULL;
6070 counts = XALLOCAVEC (tree, fd->collapse);
6071 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6072 fin_bb, first_zero_iter,
6073 l2_dom_bb);
6074 t = NULL_TREE;
6076 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6077 t = integer_one_node;
6078 else
6079 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6080 fold_convert (type, fd->loop.n1),
6081 fold_convert (type, fd->loop.n2));
6082 if (fd->collapse == 1
6083 && TYPE_UNSIGNED (type)
6084 && (t == NULL_TREE || !integer_onep (t)))
6086 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6087 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6088 true, GSI_SAME_STMT);
6089 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6090 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6091 true, GSI_SAME_STMT);
6092 gimple_cond cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6093 NULL_TREE, NULL_TREE);
6094 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6095 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6096 expand_omp_regimplify_p, NULL, NULL)
6097 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6098 expand_omp_regimplify_p, NULL, NULL))
6100 gsi = gsi_for_stmt (cond_stmt);
6101 gimple_regimplify_operands (cond_stmt, &gsi);
6103 ep = split_block (entry_bb, cond_stmt);
6104 ep->flags = EDGE_TRUE_VALUE;
6105 entry_bb = ep->dest;
6106 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6107 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
6108 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
6109 if (gimple_in_ssa_p (cfun))
6111 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6112 for (gimple_phi_iterator gpi = gsi_start_phis (fin_bb);
6113 !gsi_end_p (gpi); gsi_next (&gpi))
6115 gimple_phi phi = gpi.phi ();
6116 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6117 ep, UNKNOWN_LOCATION);
6120 gsi = gsi_last_bb (entry_bb);
6123 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6124 t = fold_convert (itype, t);
6125 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6126 true, GSI_SAME_STMT);
6128 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6129 t = fold_convert (itype, t);
6130 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6131 true, GSI_SAME_STMT);
6133 n1 = fd->loop.n1;
6134 n2 = fd->loop.n2;
6135 step = fd->loop.step;
6136 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6138 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6139 OMP_CLAUSE__LOOPTEMP_);
6140 gcc_assert (innerc);
6141 n1 = OMP_CLAUSE_DECL (innerc);
6142 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6143 OMP_CLAUSE__LOOPTEMP_);
6144 gcc_assert (innerc);
6145 n2 = OMP_CLAUSE_DECL (innerc);
6147 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6148 true, NULL_TREE, true, GSI_SAME_STMT);
6149 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6150 true, NULL_TREE, true, GSI_SAME_STMT);
6151 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6152 true, NULL_TREE, true, GSI_SAME_STMT);
6154 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6155 t = fold_build2 (PLUS_EXPR, itype, step, t);
6156 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6157 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6158 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6159 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6160 fold_build1 (NEGATE_EXPR, itype, t),
6161 fold_build1 (NEGATE_EXPR, itype, step));
6162 else
6163 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6164 t = fold_convert (itype, t);
6165 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6167 q = create_tmp_reg (itype, "q");
6168 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
6169 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6170 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
6172 tt = create_tmp_reg (itype, "tt");
6173 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
6174 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6175 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
6177 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
6178 gimple_cond cond_stmt = gimple_build_cond_empty (t);
6179 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6181 second_bb = split_block (entry_bb, cond_stmt)->dest;
6182 gsi = gsi_last_bb (second_bb);
6183 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6185 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
6186 GSI_SAME_STMT);
6187 gimple_assign assign_stmt =
6188 gimple_build_assign_with_ops (PLUS_EXPR, q, q,
6189 build_int_cst (itype, 1));
6190 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6192 third_bb = split_block (second_bb, assign_stmt)->dest;
6193 gsi = gsi_last_bb (third_bb);
6194 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6196 t = build2 (MULT_EXPR, itype, q, threadid);
6197 t = build2 (PLUS_EXPR, itype, t, tt);
6198 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6200 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6201 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6203 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6204 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6206 /* Remove the GIMPLE_OMP_FOR statement. */
6207 gsi_remove (&gsi, true);
6209 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6210 gsi = gsi_start_bb (seq_start_bb);
6212 tree startvar = fd->loop.v;
6213 tree endvar = NULL_TREE;
6215 if (gimple_omp_for_combined_p (fd->for_stmt))
6217 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6218 ? gimple_omp_parallel_clauses (inner_stmt)
6219 : gimple_omp_for_clauses (inner_stmt);
6220 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6221 gcc_assert (innerc);
6222 startvar = OMP_CLAUSE_DECL (innerc);
6223 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6224 OMP_CLAUSE__LOOPTEMP_);
6225 gcc_assert (innerc);
6226 endvar = OMP_CLAUSE_DECL (innerc);
6228 t = fold_convert (itype, s0);
6229 t = fold_build2 (MULT_EXPR, itype, t, step);
6230 if (POINTER_TYPE_P (type))
6231 t = fold_build_pointer_plus (n1, t);
6232 else
6233 t = fold_build2 (PLUS_EXPR, type, t, n1);
6234 t = fold_convert (TREE_TYPE (startvar), t);
6235 t = force_gimple_operand_gsi (&gsi, t,
6236 DECL_P (startvar)
6237 && TREE_ADDRESSABLE (startvar),
6238 NULL_TREE, false, GSI_CONTINUE_LINKING);
6239 assign_stmt = gimple_build_assign (startvar, t);
6240 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6242 t = fold_convert (itype, e0);
6243 t = fold_build2 (MULT_EXPR, itype, t, step);
6244 if (POINTER_TYPE_P (type))
6245 t = fold_build_pointer_plus (n1, t);
6246 else
6247 t = fold_build2 (PLUS_EXPR, type, t, n1);
6248 t = fold_convert (TREE_TYPE (startvar), t);
6249 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6250 false, GSI_CONTINUE_LINKING);
6251 if (endvar)
6253 assign_stmt = gimple_build_assign (endvar, e);
6254 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6255 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6256 assign_stmt = gimple_build_assign (fd->loop.v, e);
6257 else
6258 assign_stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6259 NULL_TREE);
6260 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6262 if (fd->collapse > 1)
6263 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6265 if (!broken_loop)
6267 /* The code controlling the sequential loop replaces the
6268 GIMPLE_OMP_CONTINUE. */
6269 gsi = gsi_last_bb (cont_bb);
6270 gimple_omp_continue cont_stmt =
6271 as_a <gimple_omp_continue> (gsi_stmt (gsi));
6272 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
6273 vmain = gimple_omp_continue_control_use (cont_stmt);
6274 vback = gimple_omp_continue_control_def (cont_stmt);
6276 if (!gimple_omp_for_combined_p (fd->for_stmt))
6278 if (POINTER_TYPE_P (type))
6279 t = fold_build_pointer_plus (vmain, step);
6280 else
6281 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6282 t = force_gimple_operand_gsi (&gsi, t,
6283 DECL_P (vback)
6284 && TREE_ADDRESSABLE (vback),
6285 NULL_TREE, true, GSI_SAME_STMT);
6286 assign_stmt = gimple_build_assign (vback, t);
6287 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6289 t = build2 (fd->loop.cond_code, boolean_type_node,
6290 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6291 ? t : vback, e);
6292 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6295 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6296 gsi_remove (&gsi, true);
6298 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6299 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6302 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6303 gsi = gsi_last_bb (exit_bb);
6304 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6306 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6307 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6309 gsi_remove (&gsi, true);
6311 /* Connect all the blocks. */
6312 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6313 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6314 ep = find_edge (entry_bb, second_bb);
6315 ep->flags = EDGE_TRUE_VALUE;
6316 ep->probability = REG_BR_PROB_BASE / 4;
6317 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6318 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6320 if (!broken_loop)
6322 ep = find_edge (cont_bb, body_bb);
6323 if (gimple_omp_for_combined_p (fd->for_stmt))
6325 remove_edge (ep);
6326 ep = NULL;
6328 else if (fd->collapse > 1)
6330 remove_edge (ep);
6331 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6333 else
6334 ep->flags = EDGE_TRUE_VALUE;
6335 find_edge (cont_bb, fin_bb)->flags
6336 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6339 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6340 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6341 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6343 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6344 recompute_dominator (CDI_DOMINATORS, body_bb));
6345 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6346 recompute_dominator (CDI_DOMINATORS, fin_bb));
6348 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6350 struct loop *loop = alloc_loop ();
6351 loop->header = body_bb;
6352 if (collapse_bb == NULL)
6353 loop->latch = cont_bb;
6354 add_loop (loop, body_bb->loop_father);
6359 /* A subroutine of expand_omp_for. Generate code for a parallel
6360 loop with static schedule and a specified chunk size. Given
6361 parameters:
6363 for (V = N1; V cond N2; V += STEP) BODY;
6365 where COND is "<" or ">", we generate pseudocode
6367 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6368 if (cond is <)
6369 adj = STEP - 1;
6370 else
6371 adj = STEP + 1;
6372 if ((__typeof (V)) -1 > 0 && cond is >)
6373 n = -(adj + N2 - N1) / -STEP;
6374 else
6375 n = (adj + N2 - N1) / STEP;
6376 trip = 0;
6377 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6378 here so that V is defined
6379 if the loop is not entered
6381 s0 = (trip * nthreads + threadid) * CHUNK;
6382 e0 = min(s0 + CHUNK, n);
6383 if (s0 < n) goto L1; else goto L4;
6385 V = s0 * STEP + N1;
6386 e = e0 * STEP + N1;
6388 BODY;
6389 V += STEP;
6390 if (V cond e) goto L2; else goto L3;
6392 trip += 1;
6393 goto L0;
6397 static void
6398 expand_omp_for_static_chunk (struct omp_region *region,
6399 struct omp_for_data *fd, gimple inner_stmt)
6401 tree n, s0, e0, e, t;
6402 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6403 tree type, itype, vmain, vback, vextra;
6404 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6405 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6406 gimple_stmt_iterator gsi;
6407 edge se;
6408 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6409 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6410 bool broken_loop = region->cont == NULL;
6411 tree *counts = NULL;
6412 tree n1, n2, step;
6414 itype = type = TREE_TYPE (fd->loop.v);
6415 if (POINTER_TYPE_P (type))
6416 itype = signed_type_for (type);
6418 entry_bb = region->entry;
6419 se = split_block (entry_bb, last_stmt (entry_bb));
6420 entry_bb = se->src;
6421 iter_part_bb = se->dest;
6422 cont_bb = region->cont;
6423 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6424 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6425 gcc_assert (broken_loop
6426 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6427 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6428 body_bb = single_succ (seq_start_bb);
6429 if (!broken_loop)
6431 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6432 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6433 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6435 exit_bb = region->exit;
6437 /* Trip and adjustment setup goes in ENTRY_BB. */
6438 gsi = gsi_last_bb (entry_bb);
6439 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6441 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6443 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6444 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6447 if (fd->collapse > 1)
6449 int first_zero_iter = -1;
6450 basic_block l2_dom_bb = NULL;
6452 counts = XALLOCAVEC (tree, fd->collapse);
6453 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6454 fin_bb, first_zero_iter,
6455 l2_dom_bb);
6456 t = NULL_TREE;
6458 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6459 t = integer_one_node;
6460 else
6461 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6462 fold_convert (type, fd->loop.n1),
6463 fold_convert (type, fd->loop.n2));
6464 if (fd->collapse == 1
6465 && TYPE_UNSIGNED (type)
6466 && (t == NULL_TREE || !integer_onep (t)))
6468 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6469 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6470 true, GSI_SAME_STMT);
6471 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6472 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6473 true, GSI_SAME_STMT);
6474 gimple_cond cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6475 NULL_TREE, NULL_TREE);
6476 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6477 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6478 expand_omp_regimplify_p, NULL, NULL)
6479 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6480 expand_omp_regimplify_p, NULL, NULL))
6482 gsi = gsi_for_stmt (cond_stmt);
6483 gimple_regimplify_operands (cond_stmt, &gsi);
6485 se = split_block (entry_bb, cond_stmt);
6486 se->flags = EDGE_TRUE_VALUE;
6487 entry_bb = se->dest;
6488 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6489 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6490 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6491 if (gimple_in_ssa_p (cfun))
6493 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6494 for (gimple_phi_iterator gpi = gsi_start_phis (fin_bb);
6495 !gsi_end_p (gpi); gsi_next (&gpi))
6497 gimple_phi phi = gpi.phi ();
6498 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6499 se, UNKNOWN_LOCATION);
6502 gsi = gsi_last_bb (entry_bb);
6505 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6506 t = fold_convert (itype, t);
6507 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6508 true, GSI_SAME_STMT);
6510 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6511 t = fold_convert (itype, t);
6512 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6513 true, GSI_SAME_STMT);
6515 n1 = fd->loop.n1;
6516 n2 = fd->loop.n2;
6517 step = fd->loop.step;
6518 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6520 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6521 OMP_CLAUSE__LOOPTEMP_);
6522 gcc_assert (innerc);
6523 n1 = OMP_CLAUSE_DECL (innerc);
6524 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6525 OMP_CLAUSE__LOOPTEMP_);
6526 gcc_assert (innerc);
6527 n2 = OMP_CLAUSE_DECL (innerc);
6529 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6530 true, NULL_TREE, true, GSI_SAME_STMT);
6531 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6532 true, NULL_TREE, true, GSI_SAME_STMT);
6533 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6534 true, NULL_TREE, true, GSI_SAME_STMT);
6535 fd->chunk_size
6536 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
6537 true, NULL_TREE, true, GSI_SAME_STMT);
6539 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6540 t = fold_build2 (PLUS_EXPR, itype, step, t);
6541 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6542 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6543 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6544 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6545 fold_build1 (NEGATE_EXPR, itype, t),
6546 fold_build1 (NEGATE_EXPR, itype, step));
6547 else
6548 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6549 t = fold_convert (itype, t);
6550 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6551 true, GSI_SAME_STMT);
6553 trip_var = create_tmp_reg (itype, ".trip");
6554 if (gimple_in_ssa_p (cfun))
6556 trip_init = make_ssa_name (trip_var, NULL);
6557 trip_main = make_ssa_name (trip_var, NULL);
6558 trip_back = make_ssa_name (trip_var, NULL);
6560 else
6562 trip_init = trip_var;
6563 trip_main = trip_var;
6564 trip_back = trip_var;
6567 gimple_assign assign_stmt =
6568 gimple_build_assign (trip_init, build_int_cst (itype, 0));
6569 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6571 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6572 t = fold_build2 (MULT_EXPR, itype, t, step);
6573 if (POINTER_TYPE_P (type))
6574 t = fold_build_pointer_plus (n1, t);
6575 else
6576 t = fold_build2 (PLUS_EXPR, type, t, n1);
6577 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6578 true, GSI_SAME_STMT);
6580 /* Remove the GIMPLE_OMP_FOR. */
6581 gsi_remove (&gsi, true);
6583 /* Iteration space partitioning goes in ITER_PART_BB. */
6584 gsi = gsi_last_bb (iter_part_bb);
6586 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6587 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6588 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6589 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6590 false, GSI_CONTINUE_LINKING);
6592 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6593 t = fold_build2 (MIN_EXPR, itype, t, n);
6594 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6595 false, GSI_CONTINUE_LINKING);
6597 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6598 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6600 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6601 gsi = gsi_start_bb (seq_start_bb);
6603 tree startvar = fd->loop.v;
6604 tree endvar = NULL_TREE;
6606 if (gimple_omp_for_combined_p (fd->for_stmt))
6608 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6609 ? gimple_omp_parallel_clauses (inner_stmt)
6610 : gimple_omp_for_clauses (inner_stmt);
6611 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6612 gcc_assert (innerc);
6613 startvar = OMP_CLAUSE_DECL (innerc);
6614 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6615 OMP_CLAUSE__LOOPTEMP_);
6616 gcc_assert (innerc);
6617 endvar = OMP_CLAUSE_DECL (innerc);
6620 t = fold_convert (itype, s0);
6621 t = fold_build2 (MULT_EXPR, itype, t, step);
6622 if (POINTER_TYPE_P (type))
6623 t = fold_build_pointer_plus (n1, t);
6624 else
6625 t = fold_build2 (PLUS_EXPR, type, t, n1);
6626 t = fold_convert (TREE_TYPE (startvar), t);
6627 t = force_gimple_operand_gsi (&gsi, t,
6628 DECL_P (startvar)
6629 && TREE_ADDRESSABLE (startvar),
6630 NULL_TREE, false, GSI_CONTINUE_LINKING);
6631 assign_stmt = gimple_build_assign (startvar, t);
6632 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6634 t = fold_convert (itype, e0);
6635 t = fold_build2 (MULT_EXPR, itype, t, step);
6636 if (POINTER_TYPE_P (type))
6637 t = fold_build_pointer_plus (n1, t);
6638 else
6639 t = fold_build2 (PLUS_EXPR, type, t, n1);
6640 t = fold_convert (TREE_TYPE (startvar), t);
6641 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6642 false, GSI_CONTINUE_LINKING);
6643 if (endvar)
6645 assign_stmt = gimple_build_assign (endvar, e);
6646 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6647 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6648 assign_stmt = gimple_build_assign (fd->loop.v, e);
6649 else
6650 assign_stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6651 NULL_TREE);
6652 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6654 if (fd->collapse > 1)
6655 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6657 if (!broken_loop)
6659 /* The code controlling the sequential loop goes in CONT_BB,
6660 replacing the GIMPLE_OMP_CONTINUE. */
6661 gsi = gsi_last_bb (cont_bb);
6662 gimple_omp_continue cont_stmt =
6663 as_a <gimple_omp_continue> (gsi_stmt (gsi));
6664 vmain = gimple_omp_continue_control_use (cont_stmt);
6665 vback = gimple_omp_continue_control_def (cont_stmt);
6667 if (!gimple_omp_for_combined_p (fd->for_stmt))
6669 if (POINTER_TYPE_P (type))
6670 t = fold_build_pointer_plus (vmain, step);
6671 else
6672 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6673 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
6674 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6675 true, GSI_SAME_STMT);
6676 assign_stmt = gimple_build_assign (vback, t);
6677 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6679 t = build2 (fd->loop.cond_code, boolean_type_node,
6680 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6681 ? t : vback, e);
6682 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6685 /* Remove GIMPLE_OMP_CONTINUE. */
6686 gsi_remove (&gsi, true);
6688 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6689 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6691 /* Trip update code goes into TRIP_UPDATE_BB. */
6692 gsi = gsi_start_bb (trip_update_bb);
6694 t = build_int_cst (itype, 1);
6695 t = build2 (PLUS_EXPR, itype, trip_main, t);
6696 assign_stmt = gimple_build_assign (trip_back, t);
6697 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6700 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6701 gsi = gsi_last_bb (exit_bb);
6702 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6704 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6705 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6707 gsi_remove (&gsi, true);
6709 /* Connect the new blocks. */
6710 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6711 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6713 if (!broken_loop)
6715 se = find_edge (cont_bb, body_bb);
6716 if (gimple_omp_for_combined_p (fd->for_stmt))
6718 remove_edge (se);
6719 se = NULL;
6721 else if (fd->collapse > 1)
6723 remove_edge (se);
6724 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6726 else
6727 se->flags = EDGE_TRUE_VALUE;
6728 find_edge (cont_bb, trip_update_bb)->flags
6729 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6731 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6734 if (gimple_in_ssa_p (cfun))
6736 gimple_phi_iterator psi;
6737 gimple_phi phi;
6738 edge re, ene;
6739 edge_var_map *vm;
6740 size_t i;
6742 gcc_assert (fd->collapse == 1 && !broken_loop);
6744 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6745 remove arguments of the phi nodes in fin_bb. We need to create
6746 appropriate phi nodes in iter_part_bb instead. */
6747 se = single_pred_edge (fin_bb);
6748 re = single_succ_edge (trip_update_bb);
6749 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
6750 ene = single_succ_edge (entry_bb);
6752 psi = gsi_start_phis (fin_bb);
6753 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6754 gsi_next (&psi), ++i)
6756 gimple_phi nphi;
6757 source_location locus;
6759 phi = psi.phi ();
6760 t = gimple_phi_result (phi);
6761 gcc_assert (t == redirect_edge_var_map_result (vm));
6762 nphi = create_phi_node (t, iter_part_bb);
6764 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6765 locus = gimple_phi_arg_location_from_edge (phi, se);
6767 /* A special case -- fd->loop.v is not yet computed in
6768 iter_part_bb, we need to use vextra instead. */
6769 if (t == fd->loop.v)
6770 t = vextra;
6771 add_phi_arg (nphi, t, ene, locus);
6772 locus = redirect_edge_var_map_location (vm);
6773 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6775 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6776 redirect_edge_var_map_clear (re);
6777 while (1)
6779 psi = gsi_start_phis (fin_bb);
6780 if (gsi_end_p (psi))
6781 break;
6782 remove_phi_node (&psi, false);
6785 /* Make phi node for trip. */
6786 phi = create_phi_node (trip_main, iter_part_bb);
6787 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6788 UNKNOWN_LOCATION);
6789 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6790 UNKNOWN_LOCATION);
6793 if (!broken_loop)
6794 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6795 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6796 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6797 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6798 recompute_dominator (CDI_DOMINATORS, fin_bb));
6799 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6800 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6801 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6802 recompute_dominator (CDI_DOMINATORS, body_bb));
6804 if (!broken_loop)
6806 struct loop *trip_loop = alloc_loop ();
6807 trip_loop->header = iter_part_bb;
6808 trip_loop->latch = trip_update_bb;
6809 add_loop (trip_loop, iter_part_bb->loop_father);
6811 if (!gimple_omp_for_combined_p (fd->for_stmt))
6813 struct loop *loop = alloc_loop ();
6814 loop->header = body_bb;
6815 if (collapse_bb == NULL)
6816 loop->latch = cont_bb;
6817 add_loop (loop, trip_loop);
6822 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
6823 Given parameters:
6824 for (V = N1; V cond N2; V += STEP) BODY;
6826 where COND is "<" or ">" or "!=", we generate pseudocode
6828 for (ind_var = low; ind_var < high; ind_var++)
6830 V = n1 + (ind_var * STEP)
6832 <BODY>
6835 In the above pseudocode, low and high are function parameters of the
6836 child function. In the function below, we are inserting a temp.
6837 variable that will be making a call to two OMP functions that will not be
6838 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
6839 with _Cilk_for). These functions are replaced with low and high
6840 by the function that handles taskreg. */
6843 static void
6844 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
6846 bool broken_loop = region->cont == NULL;
6847 basic_block entry_bb = region->entry;
6848 basic_block cont_bb = region->cont;
6850 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6851 gcc_assert (broken_loop
6852 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6853 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6854 basic_block l1_bb, l2_bb;
6856 if (!broken_loop)
6858 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6859 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6860 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6861 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6863 else
6865 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6866 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6867 l2_bb = single_succ (l1_bb);
6869 basic_block exit_bb = region->exit;
6870 basic_block l2_dom_bb = NULL;
6872 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
6874 /* Below statements until the "tree high_val = ..." are pseudo statements
6875 used to pass information to be used by expand_omp_taskreg.
6876 low_val and high_val will be replaced by the __low and __high
6877 parameter from the child function.
6879 The call_exprs part is a place-holder, it is mainly used
6880 to distinctly identify to the top-level part that this is
6881 where we should put low and high (reasoning given in header
6882 comment). */
6884 tree child_fndecl
6885 = gimple_omp_parallel_child_fn (
6886 as_a <gimple_omp_parallel> (last_stmt (region->outer->entry)));
6887 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
6888 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
6890 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
6891 high_val = t;
6892 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
6893 low_val = t;
6895 gcc_assert (low_val && high_val);
6897 tree type = TREE_TYPE (low_val);
6898 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
6899 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6901 /* Not needed in SSA form right now. */
6902 gcc_assert (!gimple_in_ssa_p (cfun));
6903 if (l2_dom_bb == NULL)
6904 l2_dom_bb = l1_bb;
6906 tree n1 = low_val;
6907 tree n2 = high_val;
6909 gimple stmt = gimple_build_assign (ind_var, n1);
6911 /* Replace the GIMPLE_OMP_FOR statement. */
6912 gsi_replace (&gsi, stmt, true);
6914 if (!broken_loop)
6916 /* Code to control the increment goes in the CONT_BB. */
6917 gsi = gsi_last_bb (cont_bb);
6918 stmt = gsi_stmt (gsi);
6919 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6920 stmt = gimple_build_assign_with_ops (PLUS_EXPR, ind_var, ind_var,
6921 build_one_cst (type));
6923 /* Replace GIMPLE_OMP_CONTINUE. */
6924 gsi_replace (&gsi, stmt, true);
6927 /* Emit the condition in L1_BB. */
6928 gsi = gsi_after_labels (l1_bb);
6929 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
6930 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
6931 fd->loop.step);
6932 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
6933 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6934 fd->loop.n1, fold_convert (sizetype, t));
6935 else
6936 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6937 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
6938 t = fold_convert (TREE_TYPE (fd->loop.v), t);
6939 expand_omp_build_assign (&gsi, fd->loop.v, t);
6941 /* The condition is always '<' since the runtime will fill in the low
6942 and high values. */
6943 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
6944 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6946 /* Remove GIMPLE_OMP_RETURN. */
6947 gsi = gsi_last_bb (exit_bb);
6948 gsi_remove (&gsi, true);
6950 /* Connect the new blocks. */
6951 remove_edge (FALLTHRU_EDGE (entry_bb));
6953 edge e, ne;
6954 if (!broken_loop)
6956 remove_edge (BRANCH_EDGE (entry_bb));
6957 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6959 e = BRANCH_EDGE (l1_bb);
6960 ne = FALLTHRU_EDGE (l1_bb);
6961 e->flags = EDGE_TRUE_VALUE;
6963 else
6965 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6967 ne = single_succ_edge (l1_bb);
6968 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6971 ne->flags = EDGE_FALSE_VALUE;
6972 e->probability = REG_BR_PROB_BASE * 7 / 8;
6973 ne->probability = REG_BR_PROB_BASE / 8;
6975 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6976 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6977 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6979 if (!broken_loop)
6981 struct loop *loop = alloc_loop ();
6982 loop->header = l1_bb;
6983 loop->latch = cont_bb;
6984 add_loop (loop, l1_bb->loop_father);
6985 loop->safelen = INT_MAX;
6988 /* Pick the correct library function based on the precision of the
6989 induction variable type. */
6990 tree lib_fun = NULL_TREE;
6991 if (TYPE_PRECISION (type) == 32)
6992 lib_fun = cilk_for_32_fndecl;
6993 else if (TYPE_PRECISION (type) == 64)
6994 lib_fun = cilk_for_64_fndecl;
6995 else
6996 gcc_unreachable ();
6998 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
7000 /* WS_ARGS contains the library function flavor to call:
7001 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
7002 user-defined grain value. If the user does not define one, then zero
7003 is passed in by the parser. */
7004 vec_alloc (region->ws_args, 2);
7005 region->ws_args->quick_push (lib_fun);
7006 region->ws_args->quick_push (fd->chunk_size);
7009 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
7010 loop. Given parameters:
7012 for (V = N1; V cond N2; V += STEP) BODY;
7014 where COND is "<" or ">", we generate pseudocode
7016 V = N1;
7017 goto L1;
7019 BODY;
7020 V += STEP;
7022 if (V cond N2) goto L0; else goto L2;
7025 For collapsed loops, given parameters:
7026 collapse(3)
7027 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
7028 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
7029 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
7030 BODY;
7032 we generate pseudocode
7034 if (cond3 is <)
7035 adj = STEP3 - 1;
7036 else
7037 adj = STEP3 + 1;
7038 count3 = (adj + N32 - N31) / STEP3;
7039 if (cond2 is <)
7040 adj = STEP2 - 1;
7041 else
7042 adj = STEP2 + 1;
7043 count2 = (adj + N22 - N21) / STEP2;
7044 if (cond1 is <)
7045 adj = STEP1 - 1;
7046 else
7047 adj = STEP1 + 1;
7048 count1 = (adj + N12 - N11) / STEP1;
7049 count = count1 * count2 * count3;
7050 V = 0;
7051 V1 = N11;
7052 V2 = N21;
7053 V3 = N31;
7054 goto L1;
7056 BODY;
7057 V += 1;
7058 V3 += STEP3;
7059 V2 += (V3 cond3 N32) ? 0 : STEP2;
7060 V3 = (V3 cond3 N32) ? V3 : N31;
7061 V1 += (V2 cond2 N22) ? 0 : STEP1;
7062 V2 = (V2 cond2 N22) ? V2 : N21;
7064 if (V < count) goto L0; else goto L2;
7069 static void
7070 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
7072 tree type, t;
7073 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
7074 gimple_stmt_iterator gsi;
7075 gimple stmt;
7076 gimple_cond cond_stmt;
7077 bool broken_loop = region->cont == NULL;
7078 edge e, ne;
7079 tree *counts = NULL;
7080 int i;
7081 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7082 OMP_CLAUSE_SAFELEN);
7083 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7084 OMP_CLAUSE__SIMDUID_);
7085 tree n1, n2;
7087 type = TREE_TYPE (fd->loop.v);
7088 entry_bb = region->entry;
7089 cont_bb = region->cont;
7090 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7091 gcc_assert (broken_loop
7092 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7093 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7094 if (!broken_loop)
7096 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7097 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7098 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7099 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7101 else
7103 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7104 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7105 l2_bb = single_succ (l1_bb);
7107 exit_bb = region->exit;
7108 l2_dom_bb = NULL;
7110 gsi = gsi_last_bb (entry_bb);
7112 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7113 /* Not needed in SSA form right now. */
7114 gcc_assert (!gimple_in_ssa_p (cfun));
7115 if (fd->collapse > 1)
7117 int first_zero_iter = -1;
7118 basic_block zero_iter_bb = l2_bb;
7120 counts = XALLOCAVEC (tree, fd->collapse);
7121 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
7122 zero_iter_bb, first_zero_iter,
7123 l2_dom_bb);
7125 if (l2_dom_bb == NULL)
7126 l2_dom_bb = l1_bb;
7128 n1 = fd->loop.n1;
7129 n2 = fd->loop.n2;
7130 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7132 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7133 OMP_CLAUSE__LOOPTEMP_);
7134 gcc_assert (innerc);
7135 n1 = OMP_CLAUSE_DECL (innerc);
7136 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7137 OMP_CLAUSE__LOOPTEMP_);
7138 gcc_assert (innerc);
7139 n2 = OMP_CLAUSE_DECL (innerc);
7140 expand_omp_build_assign (&gsi, fd->loop.v,
7141 fold_convert (type, n1));
7142 if (fd->collapse > 1)
7144 gsi_prev (&gsi);
7145 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
7146 gsi_next (&gsi);
7149 else
7151 expand_omp_build_assign (&gsi, fd->loop.v,
7152 fold_convert (type, fd->loop.n1));
7153 if (fd->collapse > 1)
7154 for (i = 0; i < fd->collapse; i++)
7156 tree itype = TREE_TYPE (fd->loops[i].v);
7157 if (POINTER_TYPE_P (itype))
7158 itype = signed_type_for (itype);
7159 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
7160 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7164 /* Remove the GIMPLE_OMP_FOR statement. */
7165 gsi_remove (&gsi, true);
7167 if (!broken_loop)
7169 /* Code to control the increment goes in the CONT_BB. */
7170 gsi = gsi_last_bb (cont_bb);
7171 stmt = gsi_stmt (gsi);
7172 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7174 if (POINTER_TYPE_P (type))
7175 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
7176 else
7177 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
7178 expand_omp_build_assign (&gsi, fd->loop.v, t);
7180 if (fd->collapse > 1)
7182 i = fd->collapse - 1;
7183 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
7185 t = fold_convert (sizetype, fd->loops[i].step);
7186 t = fold_build_pointer_plus (fd->loops[i].v, t);
7188 else
7190 t = fold_convert (TREE_TYPE (fd->loops[i].v),
7191 fd->loops[i].step);
7192 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
7193 fd->loops[i].v, t);
7195 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7197 for (i = fd->collapse - 1; i > 0; i--)
7199 tree itype = TREE_TYPE (fd->loops[i].v);
7200 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
7201 if (POINTER_TYPE_P (itype2))
7202 itype2 = signed_type_for (itype2);
7203 t = build3 (COND_EXPR, itype2,
7204 build2 (fd->loops[i].cond_code, boolean_type_node,
7205 fd->loops[i].v,
7206 fold_convert (itype, fd->loops[i].n2)),
7207 build_int_cst (itype2, 0),
7208 fold_convert (itype2, fd->loops[i - 1].step));
7209 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
7210 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
7211 else
7212 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
7213 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
7215 t = build3 (COND_EXPR, itype,
7216 build2 (fd->loops[i].cond_code, boolean_type_node,
7217 fd->loops[i].v,
7218 fold_convert (itype, fd->loops[i].n2)),
7219 fd->loops[i].v,
7220 fold_convert (itype, fd->loops[i].n1));
7221 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7225 /* Remove GIMPLE_OMP_CONTINUE. */
7226 gsi_remove (&gsi, true);
7229 /* Emit the condition in L1_BB. */
7230 gsi = gsi_start_bb (l1_bb);
7232 t = fold_convert (type, n2);
7233 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7234 false, GSI_CONTINUE_LINKING);
7235 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
7236 cond_stmt = gimple_build_cond_empty (t);
7237 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
7238 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p,
7239 NULL, NULL)
7240 || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p,
7241 NULL, NULL))
7243 gsi = gsi_for_stmt (cond_stmt);
7244 gimple_regimplify_operands (cond_stmt, &gsi);
7247 /* Remove GIMPLE_OMP_RETURN. */
7248 gsi = gsi_last_bb (exit_bb);
7249 gsi_remove (&gsi, true);
7251 /* Connect the new blocks. */
7252 remove_edge (FALLTHRU_EDGE (entry_bb));
7254 if (!broken_loop)
7256 remove_edge (BRANCH_EDGE (entry_bb));
7257 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7259 e = BRANCH_EDGE (l1_bb);
7260 ne = FALLTHRU_EDGE (l1_bb);
7261 e->flags = EDGE_TRUE_VALUE;
7263 else
7265 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7267 ne = single_succ_edge (l1_bb);
7268 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7271 ne->flags = EDGE_FALSE_VALUE;
7272 e->probability = REG_BR_PROB_BASE * 7 / 8;
7273 ne->probability = REG_BR_PROB_BASE / 8;
7275 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7276 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7277 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7279 if (!broken_loop)
7281 struct loop *loop = alloc_loop ();
7282 loop->header = l1_bb;
7283 loop->latch = cont_bb;
7284 add_loop (loop, l1_bb->loop_father);
7285 if (safelen == NULL_TREE)
7286 loop->safelen = INT_MAX;
7287 else
7289 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
7290 if (TREE_CODE (safelen) != INTEGER_CST)
7291 loop->safelen = 0;
7292 else if (!tree_fits_uhwi_p (safelen)
7293 || tree_to_uhwi (safelen) > INT_MAX)
7294 loop->safelen = INT_MAX;
7295 else
7296 loop->safelen = tree_to_uhwi (safelen);
7297 if (loop->safelen == 1)
7298 loop->safelen = 0;
7300 if (simduid)
7302 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
7303 cfun->has_simduid_loops = true;
7305 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7306 the loop. */
7307 if ((flag_tree_loop_vectorize
7308 || (!global_options_set.x_flag_tree_loop_vectorize
7309 && !global_options_set.x_flag_tree_vectorize))
7310 && flag_tree_loop_optimize
7311 && loop->safelen > 1)
7313 loop->force_vectorize = true;
7314 cfun->has_force_vectorize_loops = true;
7320 /* Expand the OpenMP loop defined by REGION. */
7322 static void
7323 expand_omp_for (struct omp_region *region, gimple inner_stmt)
7325 struct omp_for_data fd;
7326 struct omp_for_data_loop *loops;
7328 loops
7329 = (struct omp_for_data_loop *)
7330 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
7331 * sizeof (struct omp_for_data_loop));
7332 extract_omp_for_data (as_a <gimple_omp_for> (last_stmt (region->entry)),
7333 &fd, loops);
7334 region->sched_kind = fd.sched_kind;
7336 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
7337 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7338 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7339 if (region->cont)
7341 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
7342 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7343 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7345 else
7346 /* If there isn't a continue then this is a degerate case where
7347 the introduction of abnormal edges during lowering will prevent
7348 original loops from being detected. Fix that up. */
7349 loops_state_set (LOOPS_NEED_FIXUP);
7351 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
7352 expand_omp_simd (region, &fd);
7353 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
7354 expand_cilk_for (region, &fd);
7355 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
7356 && !fd.have_ordered)
7358 if (fd.chunk_size == NULL)
7359 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
7360 else
7361 expand_omp_for_static_chunk (region, &fd, inner_stmt);
7363 else
7365 int fn_index, start_ix, next_ix;
7367 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
7368 == GF_OMP_FOR_KIND_FOR);
7369 if (fd.chunk_size == NULL
7370 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
7371 fd.chunk_size = integer_zero_node;
7372 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
7373 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
7374 ? 3 : fd.sched_kind;
7375 fn_index += fd.have_ordered * 4;
7376 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
7377 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
7378 if (fd.iter_type == long_long_unsigned_type_node)
7380 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
7381 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
7382 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
7383 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
7385 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
7386 (enum built_in_function) next_ix, inner_stmt);
7389 if (gimple_in_ssa_p (cfun))
7390 update_ssa (TODO_update_ssa_only_virtuals);
7394 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7396 v = GOMP_sections_start (n);
7398 switch (v)
7400 case 0:
7401 goto L2;
7402 case 1:
7403 section 1;
7404 goto L1;
7405 case 2:
7407 case n:
7409 default:
7410 abort ();
7413 v = GOMP_sections_next ();
7414 goto L0;
7416 reduction;
7418 If this is a combined parallel sections, replace the call to
7419 GOMP_sections_start with call to GOMP_sections_next. */
7421 static void
7422 expand_omp_sections (struct omp_region *region)
7424 tree t, u, vin = NULL, vmain, vnext, l2;
7425 unsigned len;
7426 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7427 gimple_stmt_iterator si, switch_si;
7428 gimple_omp_sections sections_stmt;
7429 gimple stmt;
7430 gimple_omp_continue cont;
7431 edge_iterator ei;
7432 edge e;
7433 struct omp_region *inner;
7434 unsigned i, casei;
7435 bool exit_reachable = region->cont != NULL;
7437 gcc_assert (region->exit != NULL);
7438 entry_bb = region->entry;
7439 l0_bb = single_succ (entry_bb);
7440 l1_bb = region->cont;
7441 l2_bb = region->exit;
7442 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7443 l2 = gimple_block_label (l2_bb);
7444 else
7446 /* This can happen if there are reductions. */
7447 len = EDGE_COUNT (l0_bb->succs);
7448 gcc_assert (len > 0);
7449 e = EDGE_SUCC (l0_bb, len - 1);
7450 si = gsi_last_bb (e->dest);
7451 l2 = NULL_TREE;
7452 if (gsi_end_p (si)
7453 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7454 l2 = gimple_block_label (e->dest);
7455 else
7456 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7458 si = gsi_last_bb (e->dest);
7459 if (gsi_end_p (si)
7460 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7462 l2 = gimple_block_label (e->dest);
7463 break;
7467 if (exit_reachable)
7468 default_bb = create_empty_bb (l1_bb->prev_bb);
7469 else
7470 default_bb = create_empty_bb (l0_bb);
7472 /* We will build a switch() with enough cases for all the
7473 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7474 and a default case to abort if something goes wrong. */
7475 len = EDGE_COUNT (l0_bb->succs);
7477 /* Use vec::quick_push on label_vec throughout, since we know the size
7478 in advance. */
7479 auto_vec<tree> label_vec (len);
7481 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7482 GIMPLE_OMP_SECTIONS statement. */
7483 si = gsi_last_bb (entry_bb);
7484 sections_stmt = as_a <gimple_omp_sections> (gsi_stmt (si));
7485 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7486 vin = gimple_omp_sections_control (sections_stmt);
7487 if (!is_combined_parallel (region))
7489 /* If we are not inside a combined parallel+sections region,
7490 call GOMP_sections_start. */
7491 t = build_int_cst (unsigned_type_node, len - 1);
7492 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7493 stmt = gimple_build_call (u, 1, t);
7495 else
7497 /* Otherwise, call GOMP_sections_next. */
7498 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7499 stmt = gimple_build_call (u, 0);
7501 gimple_call_set_lhs (stmt, vin);
7502 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7503 gsi_remove (&si, true);
7505 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7506 L0_BB. */
7507 switch_si = gsi_last_bb (l0_bb);
7508 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
7509 if (exit_reachable)
7511 cont = as_a <gimple_omp_continue> (last_stmt (l1_bb));
7512 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
7513 vmain = gimple_omp_continue_control_use (cont);
7514 vnext = gimple_omp_continue_control_def (cont);
7516 else
7518 vmain = vin;
7519 vnext = NULL_TREE;
7522 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
7523 label_vec.quick_push (t);
7524 i = 1;
7526 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7527 for (inner = region->inner, casei = 1;
7528 inner;
7529 inner = inner->next, i++, casei++)
7531 basic_block s_entry_bb, s_exit_bb;
7533 /* Skip optional reduction region. */
7534 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7536 --i;
7537 --casei;
7538 continue;
7541 s_entry_bb = inner->entry;
7542 s_exit_bb = inner->exit;
7544 t = gimple_block_label (s_entry_bb);
7545 u = build_int_cst (unsigned_type_node, casei);
7546 u = build_case_label (u, NULL, t);
7547 label_vec.quick_push (u);
7549 si = gsi_last_bb (s_entry_bb);
7550 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7551 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7552 gsi_remove (&si, true);
7553 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7555 if (s_exit_bb == NULL)
7556 continue;
7558 si = gsi_last_bb (s_exit_bb);
7559 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7560 gsi_remove (&si, true);
7562 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7565 /* Error handling code goes in DEFAULT_BB. */
7566 t = gimple_block_label (default_bb);
7567 u = build_case_label (NULL, NULL, t);
7568 make_edge (l0_bb, default_bb, 0);
7569 add_bb_to_loop (default_bb, current_loops->tree_root);
7571 stmt = gimple_build_switch (vmain, u, label_vec);
7572 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7573 gsi_remove (&switch_si, true);
7575 si = gsi_start_bb (default_bb);
7576 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7577 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7579 if (exit_reachable)
7581 tree bfn_decl;
7583 /* Code to get the next section goes in L1_BB. */
7584 si = gsi_last_bb (l1_bb);
7585 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7587 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7588 stmt = gimple_build_call (bfn_decl, 0);
7589 gimple_call_set_lhs (stmt, vnext);
7590 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7591 gsi_remove (&si, true);
7593 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7596 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7597 si = gsi_last_bb (l2_bb);
7598 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7599 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7600 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7601 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7602 else
7603 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7604 stmt = gimple_build_call (t, 0);
7605 if (gimple_omp_return_lhs (gsi_stmt (si)))
7606 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7607 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7608 gsi_remove (&si, true);
7610 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7614 /* Expand code for an OpenMP single directive. We've already expanded
7615 much of the code, here we simply place the GOMP_barrier call. */
7617 static void
7618 expand_omp_single (struct omp_region *region)
7620 basic_block entry_bb, exit_bb;
7621 gimple_stmt_iterator si;
7623 entry_bb = region->entry;
7624 exit_bb = region->exit;
7626 si = gsi_last_bb (entry_bb);
7627 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7628 gsi_remove (&si, true);
7629 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7631 si = gsi_last_bb (exit_bb);
7632 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7634 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7635 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7637 gsi_remove (&si, true);
7638 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7642 /* Generic expansion for OpenMP synchronization directives: master,
7643 ordered and critical. All we need to do here is remove the entry
7644 and exit markers for REGION. */
7646 static void
7647 expand_omp_synch (struct omp_region *region)
7649 basic_block entry_bb, exit_bb;
7650 gimple_stmt_iterator si;
7652 entry_bb = region->entry;
7653 exit_bb = region->exit;
7655 si = gsi_last_bb (entry_bb);
7656 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7657 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7658 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7659 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7660 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7661 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7662 gsi_remove (&si, true);
7663 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7665 if (exit_bb)
7667 si = gsi_last_bb (exit_bb);
7668 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7669 gsi_remove (&si, true);
7670 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7674 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7675 operation as a normal volatile load. */
7677 static bool
7678 expand_omp_atomic_load (basic_block load_bb, tree addr,
7679 tree loaded_val, int index)
7681 enum built_in_function tmpbase;
7682 gimple_stmt_iterator gsi;
7683 basic_block store_bb;
7684 location_t loc;
7685 gimple stmt;
7686 tree decl, call, type, itype;
7688 gsi = gsi_last_bb (load_bb);
7689 stmt = gsi_stmt (gsi);
7690 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7691 loc = gimple_location (stmt);
7693 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7694 is smaller than word size, then expand_atomic_load assumes that the load
7695 is atomic. We could avoid the builtin entirely in this case. */
7697 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7698 decl = builtin_decl_explicit (tmpbase);
7699 if (decl == NULL_TREE)
7700 return false;
7702 type = TREE_TYPE (loaded_val);
7703 itype = TREE_TYPE (TREE_TYPE (decl));
7705 call = build_call_expr_loc (loc, decl, 2, addr,
7706 build_int_cst (NULL,
7707 gimple_omp_atomic_seq_cst_p (stmt)
7708 ? MEMMODEL_SEQ_CST
7709 : MEMMODEL_RELAXED));
7710 if (!useless_type_conversion_p (type, itype))
7711 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7712 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7714 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7715 gsi_remove (&gsi, true);
7717 store_bb = single_succ (load_bb);
7718 gsi = gsi_last_bb (store_bb);
7719 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7720 gsi_remove (&gsi, true);
7722 if (gimple_in_ssa_p (cfun))
7723 update_ssa (TODO_update_ssa_no_phi);
7725 return true;
7728 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7729 operation as a normal volatile store. */
7731 static bool
7732 expand_omp_atomic_store (basic_block load_bb, tree addr,
7733 tree loaded_val, tree stored_val, int index)
7735 enum built_in_function tmpbase;
7736 gimple_stmt_iterator gsi;
7737 basic_block store_bb = single_succ (load_bb);
7738 location_t loc;
7739 gimple stmt;
7740 tree decl, call, type, itype;
7741 enum machine_mode imode;
7742 bool exchange;
7744 gsi = gsi_last_bb (load_bb);
7745 stmt = gsi_stmt (gsi);
7746 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7748 /* If the load value is needed, then this isn't a store but an exchange. */
7749 exchange = gimple_omp_atomic_need_value_p (stmt);
7751 gsi = gsi_last_bb (store_bb);
7752 stmt = gsi_stmt (gsi);
7753 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7754 loc = gimple_location (stmt);
7756 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7757 is smaller than word size, then expand_atomic_store assumes that the store
7758 is atomic. We could avoid the builtin entirely in this case. */
7760 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7761 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7762 decl = builtin_decl_explicit (tmpbase);
7763 if (decl == NULL_TREE)
7764 return false;
7766 type = TREE_TYPE (stored_val);
7768 /* Dig out the type of the function's second argument. */
7769 itype = TREE_TYPE (decl);
7770 itype = TYPE_ARG_TYPES (itype);
7771 itype = TREE_CHAIN (itype);
7772 itype = TREE_VALUE (itype);
7773 imode = TYPE_MODE (itype);
7775 if (exchange && !can_atomic_exchange_p (imode, true))
7776 return false;
7778 if (!useless_type_conversion_p (itype, type))
7779 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7780 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7781 build_int_cst (NULL,
7782 gimple_omp_atomic_seq_cst_p (stmt)
7783 ? MEMMODEL_SEQ_CST
7784 : MEMMODEL_RELAXED));
7785 if (exchange)
7787 if (!useless_type_conversion_p (type, itype))
7788 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7789 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7792 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7793 gsi_remove (&gsi, true);
7795 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7796 gsi = gsi_last_bb (load_bb);
7797 gsi_remove (&gsi, true);
7799 if (gimple_in_ssa_p (cfun))
7800 update_ssa (TODO_update_ssa_no_phi);
7802 return true;
7805 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7806 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7807 size of the data type, and thus usable to find the index of the builtin
7808 decl. Returns false if the expression is not of the proper form. */
7810 static bool
7811 expand_omp_atomic_fetch_op (basic_block load_bb,
7812 tree addr, tree loaded_val,
7813 tree stored_val, int index)
7815 enum built_in_function oldbase, newbase, tmpbase;
7816 tree decl, itype, call;
7817 tree lhs, rhs;
7818 basic_block store_bb = single_succ (load_bb);
7819 gimple_stmt_iterator gsi;
7820 gimple stmt;
7821 location_t loc;
7822 enum tree_code code;
7823 bool need_old, need_new;
7824 enum machine_mode imode;
7825 bool seq_cst;
7827 /* We expect to find the following sequences:
7829 load_bb:
7830 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7832 store_bb:
7833 val = tmp OP something; (or: something OP tmp)
7834 GIMPLE_OMP_STORE (val)
7836 ???FIXME: Allow a more flexible sequence.
7837 Perhaps use data flow to pick the statements.
7841 gsi = gsi_after_labels (store_bb);
7842 stmt = gsi_stmt (gsi);
7843 loc = gimple_location (stmt);
7844 if (!is_gimple_assign (stmt))
7845 return false;
7846 gsi_next (&gsi);
7847 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7848 return false;
7849 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7850 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7851 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7852 gcc_checking_assert (!need_old || !need_new);
7854 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7855 return false;
7857 /* Check for one of the supported fetch-op operations. */
7858 code = gimple_assign_rhs_code (stmt);
7859 switch (code)
7861 case PLUS_EXPR:
7862 case POINTER_PLUS_EXPR:
7863 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7864 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7865 break;
7866 case MINUS_EXPR:
7867 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7868 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7869 break;
7870 case BIT_AND_EXPR:
7871 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7872 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7873 break;
7874 case BIT_IOR_EXPR:
7875 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7876 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7877 break;
7878 case BIT_XOR_EXPR:
7879 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7880 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7881 break;
7882 default:
7883 return false;
7886 /* Make sure the expression is of the proper form. */
7887 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7888 rhs = gimple_assign_rhs2 (stmt);
7889 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7890 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7891 rhs = gimple_assign_rhs1 (stmt);
7892 else
7893 return false;
7895 tmpbase = ((enum built_in_function)
7896 ((need_new ? newbase : oldbase) + index + 1));
7897 decl = builtin_decl_explicit (tmpbase);
7898 if (decl == NULL_TREE)
7899 return false;
7900 itype = TREE_TYPE (TREE_TYPE (decl));
7901 imode = TYPE_MODE (itype);
7903 /* We could test all of the various optabs involved, but the fact of the
7904 matter is that (with the exception of i486 vs i586 and xadd) all targets
7905 that support any atomic operaton optab also implements compare-and-swap.
7906 Let optabs.c take care of expanding any compare-and-swap loop. */
7907 if (!can_compare_and_swap_p (imode, true))
7908 return false;
7910 gsi = gsi_last_bb (load_bb);
7911 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7913 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7914 It only requires that the operation happen atomically. Thus we can
7915 use the RELAXED memory model. */
7916 call = build_call_expr_loc (loc, decl, 3, addr,
7917 fold_convert_loc (loc, itype, rhs),
7918 build_int_cst (NULL,
7919 seq_cst ? MEMMODEL_SEQ_CST
7920 : MEMMODEL_RELAXED));
7922 if (need_old || need_new)
7924 lhs = need_old ? loaded_val : stored_val;
7925 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7926 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7928 else
7929 call = fold_convert_loc (loc, void_type_node, call);
7930 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7931 gsi_remove (&gsi, true);
7933 gsi = gsi_last_bb (store_bb);
7934 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7935 gsi_remove (&gsi, true);
7936 gsi = gsi_last_bb (store_bb);
7937 gsi_remove (&gsi, true);
7939 if (gimple_in_ssa_p (cfun))
7940 update_ssa (TODO_update_ssa_no_phi);
7942 return true;
7945 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7947 oldval = *addr;
7948 repeat:
7949 newval = rhs; // with oldval replacing *addr in rhs
7950 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7951 if (oldval != newval)
7952 goto repeat;
7954 INDEX is log2 of the size of the data type, and thus usable to find the
7955 index of the builtin decl. */
7957 static bool
7958 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7959 tree addr, tree loaded_val, tree stored_val,
7960 int index)
7962 tree loadedi, storedi, initial, new_storedi, old_vali;
7963 tree type, itype, cmpxchg, iaddr;
7964 gimple_stmt_iterator si;
7965 basic_block loop_header = single_succ (load_bb);
7966 gimple phi, stmt;
7967 edge e;
7968 enum built_in_function fncode;
7970 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7971 order to use the RELAXED memory model effectively. */
7972 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7973 + index + 1);
7974 cmpxchg = builtin_decl_explicit (fncode);
7975 if (cmpxchg == NULL_TREE)
7976 return false;
7977 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7978 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7980 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7981 return false;
7983 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7984 si = gsi_last_bb (load_bb);
7985 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7987 /* For floating-point values, we'll need to view-convert them to integers
7988 so that we can perform the atomic compare and swap. Simplify the
7989 following code by always setting up the "i"ntegral variables. */
7990 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7992 tree iaddr_val;
7994 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7995 true), NULL);
7996 iaddr_val
7997 = force_gimple_operand_gsi (&si,
7998 fold_convert (TREE_TYPE (iaddr), addr),
7999 false, NULL_TREE, true, GSI_SAME_STMT);
8000 stmt = gimple_build_assign (iaddr, iaddr_val);
8001 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8002 loadedi = create_tmp_var (itype, NULL);
8003 if (gimple_in_ssa_p (cfun))
8004 loadedi = make_ssa_name (loadedi, NULL);
8006 else
8008 iaddr = addr;
8009 loadedi = loaded_val;
8012 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
8013 tree loaddecl = builtin_decl_explicit (fncode);
8014 if (loaddecl)
8015 initial
8016 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
8017 build_call_expr (loaddecl, 2, iaddr,
8018 build_int_cst (NULL_TREE,
8019 MEMMODEL_RELAXED)));
8020 else
8021 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
8022 build_int_cst (TREE_TYPE (iaddr), 0));
8024 initial
8025 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
8026 GSI_SAME_STMT);
8028 /* Move the value to the LOADEDI temporary. */
8029 if (gimple_in_ssa_p (cfun))
8031 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
8032 phi = create_phi_node (loadedi, loop_header);
8033 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
8034 initial);
8036 else
8037 gsi_insert_before (&si,
8038 gimple_build_assign (loadedi, initial),
8039 GSI_SAME_STMT);
8040 if (loadedi != loaded_val)
8042 gimple_stmt_iterator gsi2;
8043 tree x;
8045 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
8046 gsi2 = gsi_start_bb (loop_header);
8047 if (gimple_in_ssa_p (cfun))
8049 gimple_assign stmt;
8050 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8051 true, GSI_SAME_STMT);
8052 stmt = gimple_build_assign (loaded_val, x);
8053 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
8055 else
8057 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
8058 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8059 true, GSI_SAME_STMT);
8062 gsi_remove (&si, true);
8064 si = gsi_last_bb (store_bb);
8065 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8067 if (iaddr == addr)
8068 storedi = stored_val;
8069 else
8070 storedi =
8071 force_gimple_operand_gsi (&si,
8072 build1 (VIEW_CONVERT_EXPR, itype,
8073 stored_val), true, NULL_TREE, true,
8074 GSI_SAME_STMT);
8076 /* Build the compare&swap statement. */
8077 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
8078 new_storedi = force_gimple_operand_gsi (&si,
8079 fold_convert (TREE_TYPE (loadedi),
8080 new_storedi),
8081 true, NULL_TREE,
8082 true, GSI_SAME_STMT);
8084 if (gimple_in_ssa_p (cfun))
8085 old_vali = loadedi;
8086 else
8088 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
8089 stmt = gimple_build_assign (old_vali, loadedi);
8090 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8092 stmt = gimple_build_assign (loadedi, new_storedi);
8093 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8096 /* Note that we always perform the comparison as an integer, even for
8097 floating point. This allows the atomic operation to properly
8098 succeed even with NaNs and -0.0. */
8099 stmt = gimple_build_cond_empty
8100 (build2 (NE_EXPR, boolean_type_node,
8101 new_storedi, old_vali));
8102 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8104 /* Update cfg. */
8105 e = single_succ_edge (store_bb);
8106 e->flags &= ~EDGE_FALLTHRU;
8107 e->flags |= EDGE_FALSE_VALUE;
8109 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
8111 /* Copy the new value to loadedi (we already did that before the condition
8112 if we are not in SSA). */
8113 if (gimple_in_ssa_p (cfun))
8115 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
8116 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
8119 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
8120 gsi_remove (&si, true);
8122 struct loop *loop = alloc_loop ();
8123 loop->header = loop_header;
8124 loop->latch = store_bb;
8125 add_loop (loop, loop_header->loop_father);
8127 if (gimple_in_ssa_p (cfun))
8128 update_ssa (TODO_update_ssa_no_phi);
8130 return true;
8133 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8135 GOMP_atomic_start ();
8136 *addr = rhs;
8137 GOMP_atomic_end ();
8139 The result is not globally atomic, but works so long as all parallel
8140 references are within #pragma omp atomic directives. According to
8141 responses received from omp@openmp.org, appears to be within spec.
8142 Which makes sense, since that's how several other compilers handle
8143 this situation as well.
8144 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
8145 expanding. STORED_VAL is the operand of the matching
8146 GIMPLE_OMP_ATOMIC_STORE.
8148 We replace
8149 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
8150 loaded_val = *addr;
8152 and replace
8153 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
8154 *addr = stored_val;
8157 static bool
8158 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
8159 tree addr, tree loaded_val, tree stored_val)
8161 gimple_stmt_iterator si;
8162 gimple_assign stmt;
8163 tree t;
8165 si = gsi_last_bb (load_bb);
8166 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8168 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
8169 t = build_call_expr (t, 0);
8170 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8172 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
8173 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8174 gsi_remove (&si, true);
8176 si = gsi_last_bb (store_bb);
8177 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8179 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
8180 stored_val);
8181 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8183 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
8184 t = build_call_expr (t, 0);
8185 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8186 gsi_remove (&si, true);
8188 if (gimple_in_ssa_p (cfun))
8189 update_ssa (TODO_update_ssa_no_phi);
8190 return true;
8193 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
8194 using expand_omp_atomic_fetch_op. If it failed, we try to
8195 call expand_omp_atomic_pipeline, and if it fails too, the
8196 ultimate fallback is wrapping the operation in a mutex
8197 (expand_omp_atomic_mutex). REGION is the atomic region built
8198 by build_omp_regions_1(). */
8200 static void
8201 expand_omp_atomic (struct omp_region *region)
8203 basic_block load_bb = region->entry, store_bb = region->exit;
8204 gimple_omp_atomic_load load =
8205 as_a <gimple_omp_atomic_load> (last_stmt (load_bb));
8206 gimple_omp_atomic_store store =
8207 as_a <gimple_omp_atomic_store> (last_stmt (store_bb));
8208 tree loaded_val = gimple_omp_atomic_load_lhs (load);
8209 tree addr = gimple_omp_atomic_load_rhs (load);
8210 tree stored_val = gimple_omp_atomic_store_val (store);
8211 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8212 HOST_WIDE_INT index;
8214 /* Make sure the type is one of the supported sizes. */
8215 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
8216 index = exact_log2 (index);
8217 if (index >= 0 && index <= 4)
8219 unsigned int align = TYPE_ALIGN_UNIT (type);
8221 /* __sync builtins require strict data alignment. */
8222 if (exact_log2 (align) >= index)
8224 /* Atomic load. */
8225 if (loaded_val == stored_val
8226 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8227 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8228 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8229 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
8230 return;
8232 /* Atomic store. */
8233 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8234 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8235 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8236 && store_bb == single_succ (load_bb)
8237 && first_stmt (store_bb) == store
8238 && expand_omp_atomic_store (load_bb, addr, loaded_val,
8239 stored_val, index))
8240 return;
8242 /* When possible, use specialized atomic update functions. */
8243 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
8244 && store_bb == single_succ (load_bb)
8245 && expand_omp_atomic_fetch_op (load_bb, addr,
8246 loaded_val, stored_val, index))
8247 return;
8249 /* If we don't have specialized __sync builtins, try and implement
8250 as a compare and swap loop. */
8251 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
8252 loaded_val, stored_val, index))
8253 return;
8257 /* The ultimate fallback is wrapping the operation in a mutex. */
8258 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
8262 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
8264 static void
8265 expand_omp_target (struct omp_region *region)
8267 basic_block entry_bb, exit_bb, new_bb;
8268 struct function *child_cfun = NULL;
8269 tree child_fn = NULL_TREE, block, t;
8270 gimple_stmt_iterator gsi;
8271 gimple_omp_target entry_stmt;
8272 gimple stmt;
8273 edge e;
8275 entry_stmt = as_a <gimple_omp_target> (last_stmt (region->entry));
8276 new_bb = region->entry;
8277 int kind = gimple_omp_target_kind (entry_stmt);
8278 if (kind == GF_OMP_TARGET_KIND_REGION)
8280 child_fn = gimple_omp_target_child_fn (entry_stmt);
8281 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
8284 entry_bb = region->entry;
8285 exit_bb = region->exit;
8287 if (kind == GF_OMP_TARGET_KIND_REGION)
8289 unsigned srcidx, dstidx, num;
8291 /* If the target region needs data sent from the parent
8292 function, then the very first statement (except possible
8293 tree profile counter updates) of the parallel body
8294 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
8295 &.OMP_DATA_O is passed as an argument to the child function,
8296 we need to replace it with the argument as seen by the child
8297 function.
8299 In most cases, this will end up being the identity assignment
8300 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
8301 a function call that has been inlined, the original PARM_DECL
8302 .OMP_DATA_I may have been converted into a different local
8303 variable. In which case, we need to keep the assignment. */
8304 if (gimple_omp_target_data_arg (entry_stmt))
8306 basic_block entry_succ_bb = single_succ (entry_bb);
8307 gimple_stmt_iterator gsi;
8308 tree arg;
8309 gimple tgtcopy_stmt = NULL;
8310 tree sender
8311 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
8313 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
8315 gcc_assert (!gsi_end_p (gsi));
8316 stmt = gsi_stmt (gsi);
8317 if (gimple_code (stmt) != GIMPLE_ASSIGN)
8318 continue;
8320 if (gimple_num_ops (stmt) == 2)
8322 tree arg = gimple_assign_rhs1 (stmt);
8324 /* We're ignoring the subcode because we're
8325 effectively doing a STRIP_NOPS. */
8327 if (TREE_CODE (arg) == ADDR_EXPR
8328 && TREE_OPERAND (arg, 0) == sender)
8330 tgtcopy_stmt = stmt;
8331 break;
8336 gcc_assert (tgtcopy_stmt != NULL);
8337 arg = DECL_ARGUMENTS (child_fn);
8339 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
8340 gsi_remove (&gsi, true);
8343 /* Declare local variables needed in CHILD_CFUN. */
8344 block = DECL_INITIAL (child_fn);
8345 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
8346 /* The gimplifier could record temporaries in target block
8347 rather than in containing function's local_decls chain,
8348 which would mean cgraph missed finalizing them. Do it now. */
8349 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
8350 if (TREE_CODE (t) == VAR_DECL
8351 && TREE_STATIC (t)
8352 && !DECL_EXTERNAL (t))
8353 varpool_node::finalize_decl (t);
8354 DECL_SAVED_TREE (child_fn) = NULL;
8355 /* We'll create a CFG for child_fn, so no gimple body is needed. */
8356 gimple_set_body (child_fn, NULL);
8357 TREE_USED (block) = 1;
8359 /* Reset DECL_CONTEXT on function arguments. */
8360 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
8361 DECL_CONTEXT (t) = child_fn;
8363 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
8364 so that it can be moved to the child function. */
8365 gsi = gsi_last_bb (entry_bb);
8366 stmt = gsi_stmt (gsi);
8367 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
8368 && gimple_omp_target_kind (stmt)
8369 == GF_OMP_TARGET_KIND_REGION);
8370 gsi_remove (&gsi, true);
8371 e = split_block (entry_bb, stmt);
8372 entry_bb = e->dest;
8373 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8375 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
8376 if (exit_bb)
8378 gsi = gsi_last_bb (exit_bb);
8379 gcc_assert (!gsi_end_p (gsi)
8380 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
8381 stmt = gimple_build_return (NULL);
8382 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8383 gsi_remove (&gsi, true);
8386 /* Move the target region into CHILD_CFUN. */
8388 block = gimple_block (entry_stmt);
8390 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
8391 if (exit_bb)
8392 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
8393 /* When the OMP expansion process cannot guarantee an up-to-date
8394 loop tree arrange for the child function to fixup loops. */
8395 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8396 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8398 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8399 num = vec_safe_length (child_cfun->local_decls);
8400 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8402 t = (*child_cfun->local_decls)[srcidx];
8403 if (DECL_CONTEXT (t) == cfun->decl)
8404 continue;
8405 if (srcidx != dstidx)
8406 (*child_cfun->local_decls)[dstidx] = t;
8407 dstidx++;
8409 if (dstidx != num)
8410 vec_safe_truncate (child_cfun->local_decls, dstidx);
8412 /* Inform the callgraph about the new function. */
8413 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
8414 cgraph_node::add_new_function (child_fn, true);
8416 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8417 fixed in a following pass. */
8418 push_cfun (child_cfun);
8419 cgraph_edge::rebuild_edges ();
8421 /* Some EH regions might become dead, see PR34608. If
8422 pass_cleanup_cfg isn't the first pass to happen with the
8423 new child, these dead EH edges might cause problems.
8424 Clean them up now. */
8425 if (flag_exceptions)
8427 basic_block bb;
8428 bool changed = false;
8430 FOR_EACH_BB_FN (bb, cfun)
8431 changed |= gimple_purge_dead_eh_edges (bb);
8432 if (changed)
8433 cleanup_tree_cfg ();
8435 pop_cfun ();
8438 /* Emit a library call to launch the target region, or do data
8439 transfers. */
8440 tree t1, t2, t3, t4, device, cond, c, clauses;
8441 enum built_in_function start_ix;
8442 location_t clause_loc;
8444 clauses = gimple_omp_target_clauses (entry_stmt);
8446 if (kind == GF_OMP_TARGET_KIND_REGION)
8447 start_ix = BUILT_IN_GOMP_TARGET;
8448 else if (kind == GF_OMP_TARGET_KIND_DATA)
8449 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8450 else
8451 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8453 /* By default, the value of DEVICE is -1 (let runtime library choose)
8454 and there is no conditional. */
8455 cond = NULL_TREE;
8456 device = build_int_cst (integer_type_node, -1);
8458 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
8459 if (c)
8460 cond = OMP_CLAUSE_IF_EXPR (c);
8462 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
8463 if (c)
8465 device = OMP_CLAUSE_DEVICE_ID (c);
8466 clause_loc = OMP_CLAUSE_LOCATION (c);
8468 else
8469 clause_loc = gimple_location (entry_stmt);
8471 /* Ensure 'device' is of the correct type. */
8472 device = fold_convert_loc (clause_loc, integer_type_node, device);
8474 /* If we found the clause 'if (cond)', build
8475 (cond ? device : -2). */
8476 if (cond)
8478 cond = gimple_boolify (cond);
8480 basic_block cond_bb, then_bb, else_bb;
8481 edge e;
8482 tree tmp_var;
8484 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
8485 if (kind != GF_OMP_TARGET_KIND_REGION)
8487 gsi = gsi_last_bb (new_bb);
8488 gsi_prev (&gsi);
8489 e = split_block (new_bb, gsi_stmt (gsi));
8491 else
8492 e = split_block (new_bb, NULL);
8493 cond_bb = e->src;
8494 new_bb = e->dest;
8495 remove_edge (e);
8497 then_bb = create_empty_bb (cond_bb);
8498 else_bb = create_empty_bb (then_bb);
8499 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
8500 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
8502 stmt = gimple_build_cond_empty (cond);
8503 gsi = gsi_last_bb (cond_bb);
8504 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8506 gsi = gsi_start_bb (then_bb);
8507 stmt = gimple_build_assign (tmp_var, device);
8508 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8510 gsi = gsi_start_bb (else_bb);
8511 stmt = gimple_build_assign (tmp_var,
8512 build_int_cst (integer_type_node, -2));
8513 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8515 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
8516 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
8517 add_bb_to_loop (then_bb, cond_bb->loop_father);
8518 add_bb_to_loop (else_bb, cond_bb->loop_father);
8519 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
8520 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
8522 device = tmp_var;
8525 gsi = gsi_last_bb (new_bb);
8526 t = gimple_omp_target_data_arg (entry_stmt);
8527 if (t == NULL)
8529 t1 = size_zero_node;
8530 t2 = build_zero_cst (ptr_type_node);
8531 t3 = t2;
8532 t4 = t2;
8534 else
8536 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8537 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8538 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8539 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8540 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8543 gimple g;
8544 /* FIXME: This will be address of
8545 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8546 symbol, as soon as the linker plugin is able to create it for us. */
8547 tree openmp_target = build_zero_cst (ptr_type_node);
8548 if (kind == GF_OMP_TARGET_KIND_REGION)
8550 tree fnaddr = build_fold_addr_expr (child_fn);
8551 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8552 device, fnaddr, openmp_target, t1, t2, t3, t4);
8554 else
8555 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8556 device, openmp_target, t1, t2, t3, t4);
8557 gimple_set_location (g, gimple_location (entry_stmt));
8558 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8559 if (kind != GF_OMP_TARGET_KIND_REGION)
8561 g = gsi_stmt (gsi);
8562 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8563 gsi_remove (&gsi, true);
8565 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8567 gsi = gsi_last_bb (region->exit);
8568 g = gsi_stmt (gsi);
8569 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8570 gsi_remove (&gsi, true);
8575 /* Expand the parallel region tree rooted at REGION. Expansion
8576 proceeds in depth-first order. Innermost regions are expanded
8577 first. This way, parallel regions that require a new function to
8578 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8579 internal dependencies in their body. */
8581 static void
8582 expand_omp (struct omp_region *region)
8584 while (region)
8586 location_t saved_location;
8587 gimple inner_stmt = NULL;
8589 /* First, determine whether this is a combined parallel+workshare
8590 region. */
8591 if (region->type == GIMPLE_OMP_PARALLEL)
8592 determine_parallel_type (region);
8594 if (region->type == GIMPLE_OMP_FOR
8595 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8596 inner_stmt = last_stmt (region->inner->entry);
8598 if (region->inner)
8599 expand_omp (region->inner);
8601 saved_location = input_location;
8602 if (gimple_has_location (last_stmt (region->entry)))
8603 input_location = gimple_location (last_stmt (region->entry));
8605 switch (region->type)
8607 case GIMPLE_OMP_PARALLEL:
8608 case GIMPLE_OMP_TASK:
8609 expand_omp_taskreg (region);
8610 break;
8612 case GIMPLE_OMP_FOR:
8613 expand_omp_for (region, inner_stmt);
8614 break;
8616 case GIMPLE_OMP_SECTIONS:
8617 expand_omp_sections (region);
8618 break;
8620 case GIMPLE_OMP_SECTION:
8621 /* Individual omp sections are handled together with their
8622 parent GIMPLE_OMP_SECTIONS region. */
8623 break;
8625 case GIMPLE_OMP_SINGLE:
8626 expand_omp_single (region);
8627 break;
8629 case GIMPLE_OMP_MASTER:
8630 case GIMPLE_OMP_TASKGROUP:
8631 case GIMPLE_OMP_ORDERED:
8632 case GIMPLE_OMP_CRITICAL:
8633 case GIMPLE_OMP_TEAMS:
8634 expand_omp_synch (region);
8635 break;
8637 case GIMPLE_OMP_ATOMIC_LOAD:
8638 expand_omp_atomic (region);
8639 break;
8641 case GIMPLE_OMP_TARGET:
8642 expand_omp_target (region);
8643 break;
8645 default:
8646 gcc_unreachable ();
8649 input_location = saved_location;
8650 region = region->next;
8655 /* Helper for build_omp_regions. Scan the dominator tree starting at
8656 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8657 true, the function ends once a single tree is built (otherwise, whole
8658 forest of OMP constructs may be built). */
8660 static void
8661 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8662 bool single_tree)
8664 gimple_stmt_iterator gsi;
8665 gimple stmt;
8666 basic_block son;
8668 gsi = gsi_last_bb (bb);
8669 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8671 struct omp_region *region;
8672 enum gimple_code code;
8674 stmt = gsi_stmt (gsi);
8675 code = gimple_code (stmt);
8676 if (code == GIMPLE_OMP_RETURN)
8678 /* STMT is the return point out of region PARENT. Mark it
8679 as the exit point and make PARENT the immediately
8680 enclosing region. */
8681 gcc_assert (parent);
8682 region = parent;
8683 region->exit = bb;
8684 parent = parent->outer;
8686 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8688 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8689 GIMPLE_OMP_RETURN, but matches with
8690 GIMPLE_OMP_ATOMIC_LOAD. */
8691 gcc_assert (parent);
8692 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8693 region = parent;
8694 region->exit = bb;
8695 parent = parent->outer;
8698 else if (code == GIMPLE_OMP_CONTINUE)
8700 gcc_assert (parent);
8701 parent->cont = bb;
8703 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8705 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8706 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8709 else if (code == GIMPLE_OMP_TARGET
8710 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8711 new_omp_region (bb, code, parent);
8712 else
8714 /* Otherwise, this directive becomes the parent for a new
8715 region. */
8716 region = new_omp_region (bb, code, parent);
8717 parent = region;
8721 if (single_tree && !parent)
8722 return;
8724 for (son = first_dom_son (CDI_DOMINATORS, bb);
8725 son;
8726 son = next_dom_son (CDI_DOMINATORS, son))
8727 build_omp_regions_1 (son, parent, single_tree);
8730 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8731 root_omp_region. */
8733 static void
8734 build_omp_regions_root (basic_block root)
8736 gcc_assert (root_omp_region == NULL);
8737 build_omp_regions_1 (root, NULL, true);
8738 gcc_assert (root_omp_region != NULL);
8741 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8743 void
8744 omp_expand_local (basic_block head)
8746 build_omp_regions_root (head);
8747 if (dump_file && (dump_flags & TDF_DETAILS))
8749 fprintf (dump_file, "\nOMP region tree\n\n");
8750 dump_omp_region (dump_file, root_omp_region, 0);
8751 fprintf (dump_file, "\n");
8754 remove_exit_barriers (root_omp_region);
8755 expand_omp (root_omp_region);
8757 free_omp_regions ();
8760 /* Scan the CFG and build a tree of OMP regions. Return the root of
8761 the OMP region tree. */
8763 static void
8764 build_omp_regions (void)
8766 gcc_assert (root_omp_region == NULL);
8767 calculate_dominance_info (CDI_DOMINATORS);
8768 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8771 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8773 static unsigned int
8774 execute_expand_omp (void)
8776 build_omp_regions ();
8778 if (!root_omp_region)
8779 return 0;
8781 if (dump_file)
8783 fprintf (dump_file, "\nOMP region tree\n\n");
8784 dump_omp_region (dump_file, root_omp_region, 0);
8785 fprintf (dump_file, "\n");
8788 remove_exit_barriers (root_omp_region);
8790 expand_omp (root_omp_region);
8792 cleanup_tree_cfg ();
8794 free_omp_regions ();
8796 return 0;
8799 /* OMP expansion -- the default pass, run before creation of SSA form. */
8801 namespace {
8803 const pass_data pass_data_expand_omp =
8805 GIMPLE_PASS, /* type */
8806 "ompexp", /* name */
8807 OPTGROUP_NONE, /* optinfo_flags */
8808 TV_NONE, /* tv_id */
8809 PROP_gimple_any, /* properties_required */
8810 0, /* properties_provided */
8811 0, /* properties_destroyed */
8812 0, /* todo_flags_start */
8813 0, /* todo_flags_finish */
8816 class pass_expand_omp : public gimple_opt_pass
8818 public:
8819 pass_expand_omp (gcc::context *ctxt)
8820 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8823 /* opt_pass methods: */
8824 virtual bool gate (function *)
8826 return ((flag_openmp != 0 || flag_openmp_simd != 0
8827 || flag_cilkplus != 0) && !seen_error ());
8830 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8832 }; // class pass_expand_omp
8834 } // anon namespace
8836 gimple_opt_pass *
8837 make_pass_expand_omp (gcc::context *ctxt)
8839 return new pass_expand_omp (ctxt);
8842 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8844 /* If ctx is a worksharing context inside of a cancellable parallel
8845 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8846 and conditional branch to parallel's cancel_label to handle
8847 cancellation in the implicit barrier. */
8849 static void
8850 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8852 gimple omp_return = gimple_seq_last_stmt (*body);
8853 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8854 if (gimple_omp_return_nowait_p (omp_return))
8855 return;
8856 if (ctx->outer
8857 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8858 && ctx->outer->cancellable)
8860 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
8861 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
8862 tree lhs = create_tmp_var (c_bool_type, NULL);
8863 gimple_omp_return_set_lhs (omp_return, lhs);
8864 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8865 gimple g = gimple_build_cond (NE_EXPR, lhs,
8866 fold_convert (c_bool_type,
8867 boolean_false_node),
8868 ctx->outer->cancel_label, fallthru_label);
8869 gimple_seq_add_stmt (body, g);
8870 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8874 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8875 CTX is the enclosing OMP context for the current statement. */
8877 static void
8878 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8880 tree block, control;
8881 gimple_stmt_iterator tgsi;
8882 gimple_omp_sections stmt;
8883 gimple t;
8884 gimple_bind new_stmt, bind;
8885 gimple_seq ilist, dlist, olist, new_body;
8887 stmt = as_a <gimple_omp_sections> (gsi_stmt (*gsi_p));
8889 push_gimplify_context ();
8891 dlist = NULL;
8892 ilist = NULL;
8893 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8894 &ilist, &dlist, ctx, NULL);
8896 new_body = gimple_omp_body (stmt);
8897 gimple_omp_set_body (stmt, NULL);
8898 tgsi = gsi_start (new_body);
8899 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8901 omp_context *sctx;
8902 gimple sec_start;
8904 sec_start = gsi_stmt (tgsi);
8905 sctx = maybe_lookup_ctx (sec_start);
8906 gcc_assert (sctx);
8908 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8909 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8910 GSI_CONTINUE_LINKING);
8911 gimple_omp_set_body (sec_start, NULL);
8913 if (gsi_one_before_end_p (tgsi))
8915 gimple_seq l = NULL;
8916 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8917 &l, ctx);
8918 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8919 gimple_omp_section_set_last (sec_start);
8922 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8923 GSI_CONTINUE_LINKING);
8926 block = make_node (BLOCK);
8927 bind = gimple_build_bind (NULL, new_body, block);
8929 olist = NULL;
8930 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
8932 block = make_node (BLOCK);
8933 new_stmt = gimple_build_bind (NULL, NULL, block);
8934 gsi_replace (gsi_p, new_stmt, true);
8936 pop_gimplify_context (new_stmt);
8937 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8938 BLOCK_VARS (block) = gimple_bind_vars (bind);
8939 if (BLOCK_VARS (block))
8940 TREE_USED (block) = 1;
8942 new_body = NULL;
8943 gimple_seq_add_seq (&new_body, ilist);
8944 gimple_seq_add_stmt (&new_body, stmt);
8945 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8946 gimple_seq_add_stmt (&new_body, bind);
8948 control = create_tmp_var (unsigned_type_node, ".section");
8949 t = gimple_build_omp_continue (control, control);
8950 gimple_omp_sections_set_control (stmt, control);
8951 gimple_seq_add_stmt (&new_body, t);
8953 gimple_seq_add_seq (&new_body, olist);
8954 if (ctx->cancellable)
8955 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
8956 gimple_seq_add_seq (&new_body, dlist);
8958 new_body = maybe_catch_exception (new_body);
8960 t = gimple_build_omp_return
8961 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8962 OMP_CLAUSE_NOWAIT));
8963 gimple_seq_add_stmt (&new_body, t);
8964 maybe_add_implicit_barrier_cancel (ctx, &new_body);
8966 gimple_bind_set_body (new_stmt, new_body);
8970 /* A subroutine of lower_omp_single. Expand the simple form of
8971 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8973 if (GOMP_single_start ())
8974 BODY;
8975 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8977 FIXME. It may be better to delay expanding the logic of this until
8978 pass_expand_omp. The expanded logic may make the job more difficult
8979 to a synchronization analysis pass. */
8981 static void
8982 lower_omp_single_simple (gimple_omp_single single_stmt, gimple_seq *pre_p)
8984 location_t loc = gimple_location (single_stmt);
8985 tree tlabel = create_artificial_label (loc);
8986 tree flabel = create_artificial_label (loc);
8987 gimple call, cond;
8988 tree lhs, decl;
8990 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
8991 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8992 call = gimple_build_call (decl, 0);
8993 gimple_call_set_lhs (call, lhs);
8994 gimple_seq_add_stmt (pre_p, call);
8996 cond = gimple_build_cond (EQ_EXPR, lhs,
8997 fold_convert_loc (loc, TREE_TYPE (lhs),
8998 boolean_true_node),
8999 tlabel, flabel);
9000 gimple_seq_add_stmt (pre_p, cond);
9001 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
9002 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
9003 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
9007 /* A subroutine of lower_omp_single. Expand the simple form of
9008 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
9010 #pragma omp single copyprivate (a, b, c)
9012 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
9015 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
9017 BODY;
9018 copyout.a = a;
9019 copyout.b = b;
9020 copyout.c = c;
9021 GOMP_single_copy_end (&copyout);
9023 else
9025 a = copyout_p->a;
9026 b = copyout_p->b;
9027 c = copyout_p->c;
9029 GOMP_barrier ();
9032 FIXME. It may be better to delay expanding the logic of this until
9033 pass_expand_omp. The expanded logic may make the job more difficult
9034 to a synchronization analysis pass. */
9036 static void
9037 lower_omp_single_copy (gimple_omp_single single_stmt, gimple_seq *pre_p,
9038 omp_context *ctx)
9040 tree ptr_type, t, l0, l1, l2, bfn_decl;
9041 gimple_seq copyin_seq;
9042 location_t loc = gimple_location (single_stmt);
9044 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
9046 ptr_type = build_pointer_type (ctx->record_type);
9047 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
9049 l0 = create_artificial_label (loc);
9050 l1 = create_artificial_label (loc);
9051 l2 = create_artificial_label (loc);
9053 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
9054 t = build_call_expr_loc (loc, bfn_decl, 0);
9055 t = fold_convert_loc (loc, ptr_type, t);
9056 gimplify_assign (ctx->receiver_decl, t, pre_p);
9058 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
9059 build_int_cst (ptr_type, 0));
9060 t = build3 (COND_EXPR, void_type_node, t,
9061 build_and_jump (&l0), build_and_jump (&l1));
9062 gimplify_and_add (t, pre_p);
9064 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
9066 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
9068 copyin_seq = NULL;
9069 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
9070 &copyin_seq, ctx);
9072 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9073 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
9074 t = build_call_expr_loc (loc, bfn_decl, 1, t);
9075 gimplify_and_add (t, pre_p);
9077 t = build_and_jump (&l2);
9078 gimplify_and_add (t, pre_p);
9080 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
9082 gimple_seq_add_seq (pre_p, copyin_seq);
9084 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
9088 /* Expand code for an OpenMP single directive. */
9090 static void
9091 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9093 tree block;
9094 gimple t;
9095 gimple_omp_single single_stmt = as_a <gimple_omp_single> (gsi_stmt (*gsi_p));
9096 gimple_bind bind;
9097 gimple_seq bind_body, bind_body_tail = NULL, dlist;
9099 push_gimplify_context ();
9101 block = make_node (BLOCK);
9102 bind = gimple_build_bind (NULL, NULL, block);
9103 gsi_replace (gsi_p, bind, true);
9104 bind_body = NULL;
9105 dlist = NULL;
9106 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
9107 &bind_body, &dlist, ctx, NULL);
9108 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
9110 gimple_seq_add_stmt (&bind_body, single_stmt);
9112 if (ctx->record_type)
9113 lower_omp_single_copy (single_stmt, &bind_body, ctx);
9114 else
9115 lower_omp_single_simple (single_stmt, &bind_body);
9117 gimple_omp_set_body (single_stmt, NULL);
9119 gimple_seq_add_seq (&bind_body, dlist);
9121 bind_body = maybe_catch_exception (bind_body);
9123 t = gimple_build_omp_return
9124 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
9125 OMP_CLAUSE_NOWAIT));
9126 gimple_seq_add_stmt (&bind_body_tail, t);
9127 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
9128 if (ctx->record_type)
9130 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
9131 tree clobber = build_constructor (ctx->record_type, NULL);
9132 TREE_THIS_VOLATILE (clobber) = 1;
9133 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
9134 clobber), GSI_SAME_STMT);
9136 gimple_seq_add_seq (&bind_body, bind_body_tail);
9137 gimple_bind_set_body (bind, bind_body);
9139 pop_gimplify_context (bind);
9141 gimple_bind_append_vars (bind, ctx->block_vars);
9142 BLOCK_VARS (block) = ctx->block_vars;
9143 if (BLOCK_VARS (block))
9144 TREE_USED (block) = 1;
9148 /* Expand code for an OpenMP master directive. */
9150 static void
9151 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9153 tree block, lab = NULL, x, bfn_decl;
9154 gimple stmt = gsi_stmt (*gsi_p);
9155 gimple_bind bind;
9156 location_t loc = gimple_location (stmt);
9157 gimple_seq tseq;
9159 push_gimplify_context ();
9161 block = make_node (BLOCK);
9162 bind = gimple_build_bind (NULL, NULL, block);
9163 gsi_replace (gsi_p, bind, true);
9164 gimple_bind_add_stmt (bind, stmt);
9166 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9167 x = build_call_expr_loc (loc, bfn_decl, 0);
9168 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
9169 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
9170 tseq = NULL;
9171 gimplify_and_add (x, &tseq);
9172 gimple_bind_add_seq (bind, tseq);
9174 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9175 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9176 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9177 gimple_omp_set_body (stmt, NULL);
9179 gimple_bind_add_stmt (bind, gimple_build_label (lab));
9181 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9183 pop_gimplify_context (bind);
9185 gimple_bind_append_vars (bind, ctx->block_vars);
9186 BLOCK_VARS (block) = ctx->block_vars;
9190 /* Expand code for an OpenMP taskgroup directive. */
9192 static void
9193 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9195 gimple stmt = gsi_stmt (*gsi_p);
9196 gimple_call x;
9197 gimple_bind bind;
9198 tree block = make_node (BLOCK);
9200 bind = gimple_build_bind (NULL, NULL, block);
9201 gsi_replace (gsi_p, bind, true);
9202 gimple_bind_add_stmt (bind, stmt);
9204 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
9206 gimple_bind_add_stmt (bind, x);
9208 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9209 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9210 gimple_omp_set_body (stmt, NULL);
9212 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9214 gimple_bind_append_vars (bind, ctx->block_vars);
9215 BLOCK_VARS (block) = ctx->block_vars;
9219 /* Expand code for an OpenMP ordered directive. */
9221 static void
9222 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9224 tree block;
9225 gimple stmt = gsi_stmt (*gsi_p);
9226 gimple_call x;
9227 gimple_bind bind;
9229 push_gimplify_context ();
9231 block = make_node (BLOCK);
9232 bind = gimple_build_bind (NULL, NULL, block);
9233 gsi_replace (gsi_p, bind, true);
9234 gimple_bind_add_stmt (bind, stmt);
9236 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
9238 gimple_bind_add_stmt (bind, x);
9240 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9241 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9242 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9243 gimple_omp_set_body (stmt, NULL);
9245 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
9246 gimple_bind_add_stmt (bind, x);
9248 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9250 pop_gimplify_context (bind);
9252 gimple_bind_append_vars (bind, ctx->block_vars);
9253 BLOCK_VARS (block) = gimple_bind_vars (bind);
9257 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
9258 substitution of a couple of function calls. But in the NAMED case,
9259 requires that languages coordinate a symbol name. It is therefore
9260 best put here in common code. */
9262 static GTY((param1_is (tree), param2_is (tree)))
9263 splay_tree critical_name_mutexes;
9265 static void
9266 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9268 tree block;
9269 tree name, lock, unlock;
9270 gimple_omp_critical stmt = as_a <gimple_omp_critical> (gsi_stmt (*gsi_p));
9271 gimple_bind bind;
9272 location_t loc = gimple_location (stmt);
9273 gimple_seq tbody;
9275 name = gimple_omp_critical_name (stmt);
9276 if (name)
9278 tree decl;
9279 splay_tree_node n;
9281 if (!critical_name_mutexes)
9282 critical_name_mutexes
9283 = splay_tree_new_ggc (splay_tree_compare_pointers,
9284 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9285 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9287 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
9288 if (n == NULL)
9290 char *new_str;
9292 decl = create_tmp_var_raw (ptr_type_node, NULL);
9294 new_str = ACONCAT ((".gomp_critical_user_",
9295 IDENTIFIER_POINTER (name), NULL));
9296 DECL_NAME (decl) = get_identifier (new_str);
9297 TREE_PUBLIC (decl) = 1;
9298 TREE_STATIC (decl) = 1;
9299 DECL_COMMON (decl) = 1;
9300 DECL_ARTIFICIAL (decl) = 1;
9301 DECL_IGNORED_P (decl) = 1;
9302 varpool_node::finalize_decl (decl);
9304 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
9305 (splay_tree_value) decl);
9307 else
9308 decl = (tree) n->value;
9310 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
9311 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
9313 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
9314 unlock = build_call_expr_loc (loc, unlock, 1,
9315 build_fold_addr_expr_loc (loc, decl));
9317 else
9319 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
9320 lock = build_call_expr_loc (loc, lock, 0);
9322 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
9323 unlock = build_call_expr_loc (loc, unlock, 0);
9326 push_gimplify_context ();
9328 block = make_node (BLOCK);
9329 bind = gimple_build_bind (NULL, NULL, block);
9330 gsi_replace (gsi_p, bind, true);
9331 gimple_bind_add_stmt (bind, stmt);
9333 tbody = gimple_bind_body (bind);
9334 gimplify_and_add (lock, &tbody);
9335 gimple_bind_set_body (bind, tbody);
9337 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9338 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9339 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9340 gimple_omp_set_body (stmt, NULL);
9342 tbody = gimple_bind_body (bind);
9343 gimplify_and_add (unlock, &tbody);
9344 gimple_bind_set_body (bind, tbody);
9346 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9348 pop_gimplify_context (bind);
9349 gimple_bind_append_vars (bind, ctx->block_vars);
9350 BLOCK_VARS (block) = gimple_bind_vars (bind);
9354 /* A subroutine of lower_omp_for. Generate code to emit the predicate
9355 for a lastprivate clause. Given a loop control predicate of (V
9356 cond N2), we gate the clause on (!(V cond N2)). The lowered form
9357 is appended to *DLIST, iterator initialization is appended to
9358 *BODY_P. */
9360 static void
9361 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
9362 gimple_seq *dlist, struct omp_context *ctx)
9364 tree clauses, cond, vinit;
9365 enum tree_code cond_code;
9366 gimple_seq stmts;
9368 cond_code = fd->loop.cond_code;
9369 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
9371 /* When possible, use a strict equality expression. This can let VRP
9372 type optimizations deduce the value and remove a copy. */
9373 if (tree_fits_shwi_p (fd->loop.step))
9375 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
9376 if (step == 1 || step == -1)
9377 cond_code = EQ_EXPR;
9380 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
9382 clauses = gimple_omp_for_clauses (fd->for_stmt);
9383 stmts = NULL;
9384 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
9385 if (!gimple_seq_empty_p (stmts))
9387 gimple_seq_add_seq (&stmts, *dlist);
9388 *dlist = stmts;
9390 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
9391 vinit = fd->loop.n1;
9392 if (cond_code == EQ_EXPR
9393 && tree_fits_shwi_p (fd->loop.n2)
9394 && ! integer_zerop (fd->loop.n2))
9395 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
9396 else
9397 vinit = unshare_expr (vinit);
9399 /* Initialize the iterator variable, so that threads that don't execute
9400 any iterations don't execute the lastprivate clauses by accident. */
9401 gimplify_assign (fd->loop.v, vinit, body_p);
9406 /* Lower code for an OpenMP loop directive. */
9408 static void
9409 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9411 tree *rhs_p, block;
9412 struct omp_for_data fd, *fdp = NULL;
9413 gimple_omp_for stmt = as_a <gimple_omp_for> (gsi_stmt (*gsi_p));
9414 gimple_bind new_stmt;
9415 gimple_seq omp_for_body, body, dlist;
9416 size_t i;
9418 push_gimplify_context ();
9420 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
9422 block = make_node (BLOCK);
9423 new_stmt = gimple_build_bind (NULL, NULL, block);
9424 /* Replace at gsi right away, so that 'stmt' is no member
9425 of a sequence anymore as we're going to add to to a different
9426 one below. */
9427 gsi_replace (gsi_p, new_stmt, true);
9429 /* Move declaration of temporaries in the loop body before we make
9430 it go away. */
9431 omp_for_body = gimple_omp_body (stmt);
9432 if (!gimple_seq_empty_p (omp_for_body)
9433 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
9435 gimple_bind inner_bind =
9436 as_a <gimple_bind> (gimple_seq_first_stmt (omp_for_body));
9437 tree vars = gimple_bind_vars (inner_bind);
9438 gimple_bind_append_vars (new_stmt, vars);
9439 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
9440 keep them on the inner_bind and it's block. */
9441 gimple_bind_set_vars (inner_bind, NULL_TREE);
9442 if (gimple_bind_block (inner_bind))
9443 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
9446 if (gimple_omp_for_combined_into_p (stmt))
9448 extract_omp_for_data (stmt, &fd, NULL);
9449 fdp = &fd;
9451 /* We need two temporaries with fd.loop.v type (istart/iend)
9452 and then (fd.collapse - 1) temporaries with the same
9453 type for count2 ... countN-1 vars if not constant. */
9454 size_t count = 2;
9455 tree type = fd.iter_type;
9456 if (fd.collapse > 1
9457 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
9458 count += fd.collapse - 1;
9459 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
9460 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
9461 tree clauses = *pc;
9462 if (parallel_for)
9463 outerc
9464 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
9465 OMP_CLAUSE__LOOPTEMP_);
9466 for (i = 0; i < count; i++)
9468 tree temp;
9469 if (parallel_for)
9471 gcc_assert (outerc);
9472 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
9473 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
9474 OMP_CLAUSE__LOOPTEMP_);
9476 else
9478 temp = create_tmp_var (type, NULL);
9479 insert_decl_map (&ctx->outer->cb, temp, temp);
9481 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
9482 OMP_CLAUSE_DECL (*pc) = temp;
9483 pc = &OMP_CLAUSE_CHAIN (*pc);
9485 *pc = clauses;
9488 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9489 dlist = NULL;
9490 body = NULL;
9491 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
9492 fdp);
9493 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
9495 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9497 /* Lower the header expressions. At this point, we can assume that
9498 the header is of the form:
9500 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9502 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9503 using the .omp_data_s mapping, if needed. */
9504 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
9506 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
9507 if (!is_gimple_min_invariant (*rhs_p))
9508 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9510 rhs_p = gimple_omp_for_final_ptr (stmt, i);
9511 if (!is_gimple_min_invariant (*rhs_p))
9512 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9514 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
9515 if (!is_gimple_min_invariant (*rhs_p))
9516 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9519 /* Once lowered, extract the bounds and clauses. */
9520 extract_omp_for_data (stmt, &fd, NULL);
9522 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
9524 gimple_seq_add_stmt (&body, stmt);
9525 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
9527 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
9528 fd.loop.v));
9530 /* After the loop, add exit clauses. */
9531 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
9533 if (ctx->cancellable)
9534 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
9536 gimple_seq_add_seq (&body, dlist);
9538 body = maybe_catch_exception (body);
9540 /* Region exit marker goes at the end of the loop body. */
9541 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
9542 maybe_add_implicit_barrier_cancel (ctx, &body);
9543 pop_gimplify_context (new_stmt);
9545 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9546 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
9547 if (BLOCK_VARS (block))
9548 TREE_USED (block) = 1;
9550 gimple_bind_set_body (new_stmt, body);
9551 gimple_omp_set_body (stmt, NULL);
9552 gimple_omp_for_set_pre_body (stmt, NULL);
9555 /* Callback for walk_stmts. Check if the current statement only contains
9556 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9558 static tree
9559 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9560 bool *handled_ops_p,
9561 struct walk_stmt_info *wi)
9563 int *info = (int *) wi->info;
9564 gimple stmt = gsi_stmt (*gsi_p);
9566 *handled_ops_p = true;
9567 switch (gimple_code (stmt))
9569 WALK_SUBSTMTS;
9571 case GIMPLE_OMP_FOR:
9572 case GIMPLE_OMP_SECTIONS:
9573 *info = *info == 0 ? 1 : -1;
9574 break;
9575 default:
9576 *info = -1;
9577 break;
9579 return NULL;
9582 struct omp_taskcopy_context
9584 /* This field must be at the beginning, as we do "inheritance": Some
9585 callback functions for tree-inline.c (e.g., omp_copy_decl)
9586 receive a copy_body_data pointer that is up-casted to an
9587 omp_context pointer. */
9588 copy_body_data cb;
9589 omp_context *ctx;
9592 static tree
9593 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9595 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9597 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9598 return create_tmp_var (TREE_TYPE (var), NULL);
9600 return var;
9603 static tree
9604 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9606 tree name, new_fields = NULL, type, f;
9608 type = lang_hooks.types.make_type (RECORD_TYPE);
9609 name = DECL_NAME (TYPE_NAME (orig_type));
9610 name = build_decl (gimple_location (tcctx->ctx->stmt),
9611 TYPE_DECL, name, type);
9612 TYPE_NAME (type) = name;
9614 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9616 tree new_f = copy_node (f);
9617 DECL_CONTEXT (new_f) = type;
9618 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9619 TREE_CHAIN (new_f) = new_fields;
9620 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9621 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9622 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9623 &tcctx->cb, NULL);
9624 new_fields = new_f;
9625 tcctx->cb.decl_map->put (f, new_f);
9627 TYPE_FIELDS (type) = nreverse (new_fields);
9628 layout_type (type);
9629 return type;
9632 /* Create task copyfn. */
9634 static void
9635 create_task_copyfn (gimple_omp_task task_stmt, omp_context *ctx)
9637 struct function *child_cfun;
9638 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9639 tree record_type, srecord_type, bind, list;
9640 bool record_needs_remap = false, srecord_needs_remap = false;
9641 splay_tree_node n;
9642 struct omp_taskcopy_context tcctx;
9643 location_t loc = gimple_location (task_stmt);
9645 child_fn = gimple_omp_task_copy_fn (task_stmt);
9646 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9647 gcc_assert (child_cfun->cfg == NULL);
9648 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9650 /* Reset DECL_CONTEXT on function arguments. */
9651 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9652 DECL_CONTEXT (t) = child_fn;
9654 /* Populate the function. */
9655 push_gimplify_context ();
9656 push_cfun (child_cfun);
9658 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9659 TREE_SIDE_EFFECTS (bind) = 1;
9660 list = NULL;
9661 DECL_SAVED_TREE (child_fn) = bind;
9662 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9664 /* Remap src and dst argument types if needed. */
9665 record_type = ctx->record_type;
9666 srecord_type = ctx->srecord_type;
9667 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9668 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9670 record_needs_remap = true;
9671 break;
9673 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9674 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9676 srecord_needs_remap = true;
9677 break;
9680 if (record_needs_remap || srecord_needs_remap)
9682 memset (&tcctx, '\0', sizeof (tcctx));
9683 tcctx.cb.src_fn = ctx->cb.src_fn;
9684 tcctx.cb.dst_fn = child_fn;
9685 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
9686 gcc_checking_assert (tcctx.cb.src_node);
9687 tcctx.cb.dst_node = tcctx.cb.src_node;
9688 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9689 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9690 tcctx.cb.eh_lp_nr = 0;
9691 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9692 tcctx.cb.decl_map = new hash_map<tree, tree>;
9693 tcctx.ctx = ctx;
9695 if (record_needs_remap)
9696 record_type = task_copyfn_remap_type (&tcctx, record_type);
9697 if (srecord_needs_remap)
9698 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9700 else
9701 tcctx.cb.decl_map = NULL;
9703 arg = DECL_ARGUMENTS (child_fn);
9704 TREE_TYPE (arg) = build_pointer_type (record_type);
9705 sarg = DECL_CHAIN (arg);
9706 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9708 /* First pass: initialize temporaries used in record_type and srecord_type
9709 sizes and field offsets. */
9710 if (tcctx.cb.decl_map)
9711 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9712 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9714 tree *p;
9716 decl = OMP_CLAUSE_DECL (c);
9717 p = tcctx.cb.decl_map->get (decl);
9718 if (p == NULL)
9719 continue;
9720 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9721 sf = (tree) n->value;
9722 sf = *tcctx.cb.decl_map->get (sf);
9723 src = build_simple_mem_ref_loc (loc, sarg);
9724 src = omp_build_component_ref (src, sf);
9725 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9726 append_to_statement_list (t, &list);
9729 /* Second pass: copy shared var pointers and copy construct non-VLA
9730 firstprivate vars. */
9731 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9732 switch (OMP_CLAUSE_CODE (c))
9734 case OMP_CLAUSE_SHARED:
9735 decl = OMP_CLAUSE_DECL (c);
9736 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9737 if (n == NULL)
9738 break;
9739 f = (tree) n->value;
9740 if (tcctx.cb.decl_map)
9741 f = *tcctx.cb.decl_map->get (f);
9742 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9743 sf = (tree) n->value;
9744 if (tcctx.cb.decl_map)
9745 sf = *tcctx.cb.decl_map->get (sf);
9746 src = build_simple_mem_ref_loc (loc, sarg);
9747 src = omp_build_component_ref (src, sf);
9748 dst = build_simple_mem_ref_loc (loc, arg);
9749 dst = omp_build_component_ref (dst, f);
9750 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9751 append_to_statement_list (t, &list);
9752 break;
9753 case OMP_CLAUSE_FIRSTPRIVATE:
9754 decl = OMP_CLAUSE_DECL (c);
9755 if (is_variable_sized (decl))
9756 break;
9757 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9758 if (n == NULL)
9759 break;
9760 f = (tree) n->value;
9761 if (tcctx.cb.decl_map)
9762 f = *tcctx.cb.decl_map->get (f);
9763 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9764 if (n != NULL)
9766 sf = (tree) n->value;
9767 if (tcctx.cb.decl_map)
9768 sf = *tcctx.cb.decl_map->get (sf);
9769 src = build_simple_mem_ref_loc (loc, sarg);
9770 src = omp_build_component_ref (src, sf);
9771 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9772 src = build_simple_mem_ref_loc (loc, src);
9774 else
9775 src = decl;
9776 dst = build_simple_mem_ref_loc (loc, arg);
9777 dst = omp_build_component_ref (dst, f);
9778 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9779 append_to_statement_list (t, &list);
9780 break;
9781 case OMP_CLAUSE_PRIVATE:
9782 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9783 break;
9784 decl = OMP_CLAUSE_DECL (c);
9785 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9786 f = (tree) n->value;
9787 if (tcctx.cb.decl_map)
9788 f = *tcctx.cb.decl_map->get (f);
9789 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9790 if (n != NULL)
9792 sf = (tree) n->value;
9793 if (tcctx.cb.decl_map)
9794 sf = *tcctx.cb.decl_map->get (sf);
9795 src = build_simple_mem_ref_loc (loc, sarg);
9796 src = omp_build_component_ref (src, sf);
9797 if (use_pointer_for_field (decl, NULL))
9798 src = build_simple_mem_ref_loc (loc, src);
9800 else
9801 src = decl;
9802 dst = build_simple_mem_ref_loc (loc, arg);
9803 dst = omp_build_component_ref (dst, f);
9804 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9805 append_to_statement_list (t, &list);
9806 break;
9807 default:
9808 break;
9811 /* Last pass: handle VLA firstprivates. */
9812 if (tcctx.cb.decl_map)
9813 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9814 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9816 tree ind, ptr, df;
9818 decl = OMP_CLAUSE_DECL (c);
9819 if (!is_variable_sized (decl))
9820 continue;
9821 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9822 if (n == NULL)
9823 continue;
9824 f = (tree) n->value;
9825 f = *tcctx.cb.decl_map->get (f);
9826 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9827 ind = DECL_VALUE_EXPR (decl);
9828 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9829 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9830 n = splay_tree_lookup (ctx->sfield_map,
9831 (splay_tree_key) TREE_OPERAND (ind, 0));
9832 sf = (tree) n->value;
9833 sf = *tcctx.cb.decl_map->get (sf);
9834 src = build_simple_mem_ref_loc (loc, sarg);
9835 src = omp_build_component_ref (src, sf);
9836 src = build_simple_mem_ref_loc (loc, src);
9837 dst = build_simple_mem_ref_loc (loc, arg);
9838 dst = omp_build_component_ref (dst, f);
9839 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9840 append_to_statement_list (t, &list);
9841 n = splay_tree_lookup (ctx->field_map,
9842 (splay_tree_key) TREE_OPERAND (ind, 0));
9843 df = (tree) n->value;
9844 df = *tcctx.cb.decl_map->get (df);
9845 ptr = build_simple_mem_ref_loc (loc, arg);
9846 ptr = omp_build_component_ref (ptr, df);
9847 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9848 build_fold_addr_expr_loc (loc, dst));
9849 append_to_statement_list (t, &list);
9852 t = build1 (RETURN_EXPR, void_type_node, NULL);
9853 append_to_statement_list (t, &list);
9855 if (tcctx.cb.decl_map)
9856 delete tcctx.cb.decl_map;
9857 pop_gimplify_context (NULL);
9858 BIND_EXPR_BODY (bind) = list;
9859 pop_cfun ();
9862 static void
9863 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9865 tree c, clauses;
9866 gimple g;
9867 size_t n_in = 0, n_out = 0, idx = 2, i;
9869 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9870 OMP_CLAUSE_DEPEND);
9871 gcc_assert (clauses);
9872 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9873 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9874 switch (OMP_CLAUSE_DEPEND_KIND (c))
9876 case OMP_CLAUSE_DEPEND_IN:
9877 n_in++;
9878 break;
9879 case OMP_CLAUSE_DEPEND_OUT:
9880 case OMP_CLAUSE_DEPEND_INOUT:
9881 n_out++;
9882 break;
9883 default:
9884 gcc_unreachable ();
9886 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9887 tree array = create_tmp_var (type, NULL);
9888 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9889 NULL_TREE);
9890 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9891 gimple_seq_add_stmt (iseq, g);
9892 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9893 NULL_TREE);
9894 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9895 gimple_seq_add_stmt (iseq, g);
9896 for (i = 0; i < 2; i++)
9898 if ((i ? n_in : n_out) == 0)
9899 continue;
9900 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9901 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9902 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9904 tree t = OMP_CLAUSE_DECL (c);
9905 t = fold_convert (ptr_type_node, t);
9906 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9907 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9908 NULL_TREE, NULL_TREE);
9909 g = gimple_build_assign (r, t);
9910 gimple_seq_add_stmt (iseq, g);
9913 tree *p = gimple_omp_task_clauses_ptr (stmt);
9914 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9915 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9916 OMP_CLAUSE_CHAIN (c) = *p;
9917 *p = c;
9918 tree clobber = build_constructor (type, NULL);
9919 TREE_THIS_VOLATILE (clobber) = 1;
9920 g = gimple_build_assign (array, clobber);
9921 gimple_seq_add_stmt (oseq, g);
9924 /* Lower the OpenMP parallel or task directive in the current statement
9925 in GSI_P. CTX holds context information for the directive. */
9927 static void
9928 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9930 tree clauses;
9931 tree child_fn, t;
9932 gimple stmt = gsi_stmt (*gsi_p);
9933 gimple_bind par_bind, bind, dep_bind = NULL;
9934 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9935 location_t loc = gimple_location (stmt);
9937 clauses = gimple_omp_taskreg_clauses (stmt);
9938 par_bind =
9939 as_a <gimple_bind> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
9940 par_body = gimple_bind_body (par_bind);
9941 child_fn = ctx->cb.dst_fn;
9942 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9943 && !gimple_omp_parallel_combined_p (stmt))
9945 struct walk_stmt_info wi;
9946 int ws_num = 0;
9948 memset (&wi, 0, sizeof (wi));
9949 wi.info = &ws_num;
9950 wi.val_only = true;
9951 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
9952 if (ws_num == 1)
9953 gimple_omp_parallel_set_combined_p (stmt, true);
9955 gimple_seq dep_ilist = NULL;
9956 gimple_seq dep_olist = NULL;
9957 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9958 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9960 push_gimplify_context ();
9961 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9962 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9965 if (ctx->srecord_type)
9966 create_task_copyfn (as_a <gimple_omp_task> (stmt), ctx);
9968 push_gimplify_context ();
9970 par_olist = NULL;
9971 par_ilist = NULL;
9972 par_rlist = NULL;
9973 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
9974 lower_omp (&par_body, ctx);
9975 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
9976 lower_reduction_clauses (clauses, &par_rlist, ctx);
9978 /* Declare all the variables created by mapping and the variables
9979 declared in the scope of the parallel body. */
9980 record_vars_into (ctx->block_vars, child_fn);
9981 record_vars_into (gimple_bind_vars (par_bind), child_fn);
9983 if (ctx->record_type)
9985 ctx->sender_decl
9986 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9987 : ctx->record_type, ".omp_data_o");
9988 DECL_NAMELESS (ctx->sender_decl) = 1;
9989 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9990 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
9993 olist = NULL;
9994 ilist = NULL;
9995 lower_send_clauses (clauses, &ilist, &olist, ctx);
9996 lower_send_shared_vars (&ilist, &olist, ctx);
9998 if (ctx->record_type)
10000 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
10001 TREE_THIS_VOLATILE (clobber) = 1;
10002 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
10003 clobber));
10006 /* Once all the expansions are done, sequence all the different
10007 fragments inside gimple_omp_body. */
10009 new_body = NULL;
10011 if (ctx->record_type)
10013 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10014 /* fixup_child_record_type might have changed receiver_decl's type. */
10015 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10016 gimple_seq_add_stmt (&new_body,
10017 gimple_build_assign (ctx->receiver_decl, t));
10020 gimple_seq_add_seq (&new_body, par_ilist);
10021 gimple_seq_add_seq (&new_body, par_body);
10022 gimple_seq_add_seq (&new_body, par_rlist);
10023 if (ctx->cancellable)
10024 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
10025 gimple_seq_add_seq (&new_body, par_olist);
10026 new_body = maybe_catch_exception (new_body);
10027 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10028 gimple_omp_set_body (stmt, new_body);
10030 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
10031 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
10032 gimple_bind_add_seq (bind, ilist);
10033 gimple_bind_add_stmt (bind, stmt);
10034 gimple_bind_add_seq (bind, olist);
10036 pop_gimplify_context (NULL);
10038 if (dep_bind)
10040 gimple_bind_add_seq (dep_bind, dep_ilist);
10041 gimple_bind_add_stmt (dep_bind, bind);
10042 gimple_bind_add_seq (dep_bind, dep_olist);
10043 pop_gimplify_context (dep_bind);
10047 /* Lower the OpenMP target directive in the current statement
10048 in GSI_P. CTX holds context information for the directive. */
10050 static void
10051 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10053 tree clauses;
10054 tree child_fn, t, c;
10055 gimple_omp_target stmt = as_a <gimple_omp_target> (gsi_stmt (*gsi_p));
10056 gimple_bind tgt_bind = NULL, bind;
10057 gimple_seq tgt_body = NULL, olist, ilist, new_body;
10058 location_t loc = gimple_location (stmt);
10059 int kind = gimple_omp_target_kind (stmt);
10060 unsigned int map_cnt = 0;
10062 clauses = gimple_omp_target_clauses (stmt);
10063 if (kind == GF_OMP_TARGET_KIND_REGION)
10065 tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
10066 tgt_body = gimple_bind_body (tgt_bind);
10068 else if (kind == GF_OMP_TARGET_KIND_DATA)
10069 tgt_body = gimple_omp_body (stmt);
10070 child_fn = ctx->cb.dst_fn;
10072 push_gimplify_context ();
10074 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10075 switch (OMP_CLAUSE_CODE (c))
10077 tree var, x;
10079 default:
10080 break;
10081 case OMP_CLAUSE_MAP:
10082 case OMP_CLAUSE_TO:
10083 case OMP_CLAUSE_FROM:
10084 var = OMP_CLAUSE_DECL (c);
10085 if (!DECL_P (var))
10087 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
10088 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10089 map_cnt++;
10090 continue;
10093 if (DECL_SIZE (var)
10094 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
10096 tree var2 = DECL_VALUE_EXPR (var);
10097 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
10098 var2 = TREE_OPERAND (var2, 0);
10099 gcc_assert (DECL_P (var2));
10100 var = var2;
10103 if (!maybe_lookup_field (var, ctx))
10104 continue;
10106 if (kind == GF_OMP_TARGET_KIND_REGION)
10108 x = build_receiver_ref (var, true, ctx);
10109 tree new_var = lookup_decl (var, ctx);
10110 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10111 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10112 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10113 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
10114 x = build_simple_mem_ref (x);
10115 SET_DECL_VALUE_EXPR (new_var, x);
10116 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
10118 map_cnt++;
10121 if (kind == GF_OMP_TARGET_KIND_REGION)
10123 target_nesting_level++;
10124 lower_omp (&tgt_body, ctx);
10125 target_nesting_level--;
10127 else if (kind == GF_OMP_TARGET_KIND_DATA)
10128 lower_omp (&tgt_body, ctx);
10130 if (kind == GF_OMP_TARGET_KIND_REGION)
10132 /* Declare all the variables created by mapping and the variables
10133 declared in the scope of the target body. */
10134 record_vars_into (ctx->block_vars, child_fn);
10135 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
10138 olist = NULL;
10139 ilist = NULL;
10140 if (ctx->record_type)
10142 ctx->sender_decl
10143 = create_tmp_var (ctx->record_type, ".omp_data_arr");
10144 DECL_NAMELESS (ctx->sender_decl) = 1;
10145 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
10146 t = make_tree_vec (3);
10147 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
10148 TREE_VEC_ELT (t, 1)
10149 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
10150 ".omp_data_sizes");
10151 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
10152 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
10153 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
10154 TREE_VEC_ELT (t, 2)
10155 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
10156 map_cnt),
10157 ".omp_data_kinds");
10158 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
10159 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
10160 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
10161 gimple_omp_target_set_data_arg (stmt, t);
10163 vec<constructor_elt, va_gc> *vsize;
10164 vec<constructor_elt, va_gc> *vkind;
10165 vec_alloc (vsize, map_cnt);
10166 vec_alloc (vkind, map_cnt);
10167 unsigned int map_idx = 0;
10169 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10170 switch (OMP_CLAUSE_CODE (c))
10172 tree ovar, nc;
10174 default:
10175 break;
10176 case OMP_CLAUSE_MAP:
10177 case OMP_CLAUSE_TO:
10178 case OMP_CLAUSE_FROM:
10179 nc = c;
10180 ovar = OMP_CLAUSE_DECL (c);
10181 if (!DECL_P (ovar))
10183 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10184 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10186 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
10187 == get_base_address (ovar));
10188 nc = OMP_CLAUSE_CHAIN (c);
10189 ovar = OMP_CLAUSE_DECL (nc);
10191 else
10193 tree x = build_sender_ref (ovar, ctx);
10194 tree v
10195 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
10196 gimplify_assign (x, v, &ilist);
10197 nc = NULL_TREE;
10200 else
10202 if (DECL_SIZE (ovar)
10203 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
10205 tree ovar2 = DECL_VALUE_EXPR (ovar);
10206 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
10207 ovar2 = TREE_OPERAND (ovar2, 0);
10208 gcc_assert (DECL_P (ovar2));
10209 ovar = ovar2;
10211 if (!maybe_lookup_field (ovar, ctx))
10212 continue;
10215 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
10216 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
10217 talign = DECL_ALIGN_UNIT (ovar);
10218 if (nc)
10220 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
10221 tree x = build_sender_ref (ovar, ctx);
10222 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10223 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10224 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10225 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
10227 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10228 tree avar
10229 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
10230 mark_addressable (avar);
10231 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
10232 talign = DECL_ALIGN_UNIT (avar);
10233 avar = build_fold_addr_expr (avar);
10234 gimplify_assign (x, avar, &ilist);
10236 else if (is_gimple_reg (var))
10238 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10239 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
10240 mark_addressable (avar);
10241 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
10242 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
10243 gimplify_assign (avar, var, &ilist);
10244 avar = build_fold_addr_expr (avar);
10245 gimplify_assign (x, avar, &ilist);
10246 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
10247 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
10248 && !TYPE_READONLY (TREE_TYPE (var)))
10250 x = build_sender_ref (ovar, ctx);
10251 x = build_simple_mem_ref (x);
10252 gimplify_assign (var, x, &olist);
10255 else
10257 var = build_fold_addr_expr (var);
10258 gimplify_assign (x, var, &ilist);
10261 tree s = OMP_CLAUSE_SIZE (c);
10262 if (s == NULL_TREE)
10263 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
10264 s = fold_convert (size_type_node, s);
10265 tree purpose = size_int (map_idx++);
10266 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
10267 if (TREE_CODE (s) != INTEGER_CST)
10268 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
10270 unsigned char tkind = 0;
10271 switch (OMP_CLAUSE_CODE (c))
10273 case OMP_CLAUSE_MAP:
10274 tkind = OMP_CLAUSE_MAP_KIND (c);
10275 break;
10276 case OMP_CLAUSE_TO:
10277 tkind = OMP_CLAUSE_MAP_TO;
10278 break;
10279 case OMP_CLAUSE_FROM:
10280 tkind = OMP_CLAUSE_MAP_FROM;
10281 break;
10282 default:
10283 gcc_unreachable ();
10285 talign = ceil_log2 (talign);
10286 tkind |= talign << 3;
10287 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
10288 build_int_cst (unsigned_char_type_node,
10289 tkind));
10290 if (nc && nc != c)
10291 c = nc;
10294 gcc_assert (map_idx == map_cnt);
10296 DECL_INITIAL (TREE_VEC_ELT (t, 1))
10297 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
10298 DECL_INITIAL (TREE_VEC_ELT (t, 2))
10299 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
10300 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
10302 gimple_seq initlist = NULL;
10303 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
10304 TREE_VEC_ELT (t, 1)),
10305 &initlist, true, NULL_TREE);
10306 gimple_seq_add_seq (&ilist, initlist);
10308 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
10309 NULL);
10310 TREE_THIS_VOLATILE (clobber) = 1;
10311 gimple_seq_add_stmt (&olist,
10312 gimple_build_assign (TREE_VEC_ELT (t, 1),
10313 clobber));
10316 tree clobber = build_constructor (ctx->record_type, NULL);
10317 TREE_THIS_VOLATILE (clobber) = 1;
10318 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
10319 clobber));
10322 /* Once all the expansions are done, sequence all the different
10323 fragments inside gimple_omp_body. */
10325 new_body = NULL;
10327 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
10329 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10330 /* fixup_child_record_type might have changed receiver_decl's type. */
10331 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10332 gimple_seq_add_stmt (&new_body,
10333 gimple_build_assign (ctx->receiver_decl, t));
10336 if (kind == GF_OMP_TARGET_KIND_REGION)
10338 gimple_seq_add_seq (&new_body, tgt_body);
10339 new_body = maybe_catch_exception (new_body);
10341 else if (kind == GF_OMP_TARGET_KIND_DATA)
10342 new_body = tgt_body;
10343 if (kind != GF_OMP_TARGET_KIND_UPDATE)
10345 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10346 gimple_omp_set_body (stmt, new_body);
10349 bind = gimple_build_bind (NULL, NULL,
10350 tgt_bind ? gimple_bind_block (tgt_bind)
10351 : NULL_TREE);
10352 gsi_replace (gsi_p, bind, true);
10353 gimple_bind_add_seq (bind, ilist);
10354 gimple_bind_add_stmt (bind, stmt);
10355 gimple_bind_add_seq (bind, olist);
10357 pop_gimplify_context (NULL);
10360 /* Expand code for an OpenMP teams directive. */
10362 static void
10363 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10365 gimple_omp_teams teams_stmt = as_a <gimple_omp_teams> (gsi_stmt (*gsi_p));
10366 push_gimplify_context ();
10368 tree block = make_node (BLOCK);
10369 gimple_bind bind = gimple_build_bind (NULL, NULL, block);
10370 gsi_replace (gsi_p, bind, true);
10371 gimple_seq bind_body = NULL;
10372 gimple_seq dlist = NULL;
10373 gimple_seq olist = NULL;
10375 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10376 OMP_CLAUSE_NUM_TEAMS);
10377 if (num_teams == NULL_TREE)
10378 num_teams = build_int_cst (unsigned_type_node, 0);
10379 else
10381 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
10382 num_teams = fold_convert (unsigned_type_node, num_teams);
10383 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
10385 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10386 OMP_CLAUSE_THREAD_LIMIT);
10387 if (thread_limit == NULL_TREE)
10388 thread_limit = build_int_cst (unsigned_type_node, 0);
10389 else
10391 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
10392 thread_limit = fold_convert (unsigned_type_node, thread_limit);
10393 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
10394 fb_rvalue);
10397 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
10398 &bind_body, &dlist, ctx, NULL);
10399 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
10400 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
10401 gimple_seq_add_stmt (&bind_body, teams_stmt);
10403 location_t loc = gimple_location (teams_stmt);
10404 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
10405 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
10406 gimple_set_location (call, loc);
10407 gimple_seq_add_stmt (&bind_body, call);
10409 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
10410 gimple_omp_set_body (teams_stmt, NULL);
10411 gimple_seq_add_seq (&bind_body, olist);
10412 gimple_seq_add_seq (&bind_body, dlist);
10413 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
10414 gimple_bind_set_body (bind, bind_body);
10416 pop_gimplify_context (bind);
10418 gimple_bind_append_vars (bind, ctx->block_vars);
10419 BLOCK_VARS (block) = ctx->block_vars;
10420 if (BLOCK_VARS (block))
10421 TREE_USED (block) = 1;
10425 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
10426 regimplified. If DATA is non-NULL, lower_omp_1 is outside
10427 of OpenMP context, but with task_shared_vars set. */
10429 static tree
10430 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
10431 void *data)
10433 tree t = *tp;
10435 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
10436 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
10437 return t;
10439 if (task_shared_vars
10440 && DECL_P (t)
10441 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
10442 return t;
10444 /* If a global variable has been privatized, TREE_CONSTANT on
10445 ADDR_EXPR might be wrong. */
10446 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
10447 recompute_tree_invariant_for_addr_expr (t);
10449 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
10450 return NULL_TREE;
10453 static void
10454 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10456 gimple stmt = gsi_stmt (*gsi_p);
10457 struct walk_stmt_info wi;
10458 gimple_call call_stmt;
10460 if (gimple_has_location (stmt))
10461 input_location = gimple_location (stmt);
10463 if (task_shared_vars)
10464 memset (&wi, '\0', sizeof (wi));
10466 /* If we have issued syntax errors, avoid doing any heavy lifting.
10467 Just replace the OpenMP directives with a NOP to avoid
10468 confusing RTL expansion. */
10469 if (seen_error () && is_gimple_omp (stmt))
10471 gsi_replace (gsi_p, gimple_build_nop (), true);
10472 return;
10475 switch (gimple_code (stmt))
10477 case GIMPLE_COND:
10479 gimple_cond cond_stmt = as_a <gimple_cond> (stmt);
10480 if ((ctx || task_shared_vars)
10481 && (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
10482 lower_omp_regimplify_p,
10483 ctx ? NULL : &wi, NULL)
10484 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
10485 lower_omp_regimplify_p,
10486 ctx ? NULL : &wi, NULL)))
10487 gimple_regimplify_operands (cond_stmt, gsi_p);
10489 break;
10490 case GIMPLE_CATCH:
10491 lower_omp (gimple_catch_handler_ptr (as_a <gimple_catch> (stmt)), ctx);
10492 break;
10493 case GIMPLE_EH_FILTER:
10494 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
10495 break;
10496 case GIMPLE_TRY:
10497 lower_omp (gimple_try_eval_ptr (stmt), ctx);
10498 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
10499 break;
10500 case GIMPLE_TRANSACTION:
10501 lower_omp (gimple_transaction_body_ptr (
10502 as_a <gimple_transaction> (stmt)),
10503 ctx);
10504 break;
10505 case GIMPLE_BIND:
10506 lower_omp (gimple_bind_body_ptr (as_a <gimple_bind> (stmt)), ctx);
10507 break;
10508 case GIMPLE_OMP_PARALLEL:
10509 case GIMPLE_OMP_TASK:
10510 ctx = maybe_lookup_ctx (stmt);
10511 gcc_assert (ctx);
10512 if (ctx->cancellable)
10513 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10514 lower_omp_taskreg (gsi_p, ctx);
10515 break;
10516 case GIMPLE_OMP_FOR:
10517 ctx = maybe_lookup_ctx (stmt);
10518 gcc_assert (ctx);
10519 if (ctx->cancellable)
10520 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10521 lower_omp_for (gsi_p, ctx);
10522 break;
10523 case GIMPLE_OMP_SECTIONS:
10524 ctx = maybe_lookup_ctx (stmt);
10525 gcc_assert (ctx);
10526 if (ctx->cancellable)
10527 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10528 lower_omp_sections (gsi_p, ctx);
10529 break;
10530 case GIMPLE_OMP_SINGLE:
10531 ctx = maybe_lookup_ctx (stmt);
10532 gcc_assert (ctx);
10533 lower_omp_single (gsi_p, ctx);
10534 break;
10535 case GIMPLE_OMP_MASTER:
10536 ctx = maybe_lookup_ctx (stmt);
10537 gcc_assert (ctx);
10538 lower_omp_master (gsi_p, ctx);
10539 break;
10540 case GIMPLE_OMP_TASKGROUP:
10541 ctx = maybe_lookup_ctx (stmt);
10542 gcc_assert (ctx);
10543 lower_omp_taskgroup (gsi_p, ctx);
10544 break;
10545 case GIMPLE_OMP_ORDERED:
10546 ctx = maybe_lookup_ctx (stmt);
10547 gcc_assert (ctx);
10548 lower_omp_ordered (gsi_p, ctx);
10549 break;
10550 case GIMPLE_OMP_CRITICAL:
10551 ctx = maybe_lookup_ctx (stmt);
10552 gcc_assert (ctx);
10553 lower_omp_critical (gsi_p, ctx);
10554 break;
10555 case GIMPLE_OMP_ATOMIC_LOAD:
10556 if ((ctx || task_shared_vars)
10557 && walk_tree (gimple_omp_atomic_load_rhs_ptr (
10558 as_a <gimple_omp_atomic_load> (stmt)),
10559 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
10560 gimple_regimplify_operands (stmt, gsi_p);
10561 break;
10562 case GIMPLE_OMP_TARGET:
10563 ctx = maybe_lookup_ctx (stmt);
10564 gcc_assert (ctx);
10565 lower_omp_target (gsi_p, ctx);
10566 break;
10567 case GIMPLE_OMP_TEAMS:
10568 ctx = maybe_lookup_ctx (stmt);
10569 gcc_assert (ctx);
10570 lower_omp_teams (gsi_p, ctx);
10571 break;
10572 case GIMPLE_CALL:
10573 tree fndecl;
10574 call_stmt = as_a <gimple_call> (stmt);
10575 fndecl = gimple_call_fndecl (call_stmt);
10576 if (fndecl
10577 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10578 switch (DECL_FUNCTION_CODE (fndecl))
10580 case BUILT_IN_GOMP_BARRIER:
10581 if (ctx == NULL)
10582 break;
10583 /* FALLTHRU */
10584 case BUILT_IN_GOMP_CANCEL:
10585 case BUILT_IN_GOMP_CANCELLATION_POINT:
10586 omp_context *cctx;
10587 cctx = ctx;
10588 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10589 cctx = cctx->outer;
10590 gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
10591 if (!cctx->cancellable)
10593 if (DECL_FUNCTION_CODE (fndecl)
10594 == BUILT_IN_GOMP_CANCELLATION_POINT)
10596 stmt = gimple_build_nop ();
10597 gsi_replace (gsi_p, stmt, false);
10599 break;
10601 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10603 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10604 gimple_call_set_fndecl (call_stmt, fndecl);
10605 gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
10607 tree lhs;
10608 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)), NULL);
10609 gimple_call_set_lhs (call_stmt, lhs);
10610 tree fallthru_label;
10611 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10612 gimple g;
10613 g = gimple_build_label (fallthru_label);
10614 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10615 g = gimple_build_cond (NE_EXPR, lhs,
10616 fold_convert (TREE_TYPE (lhs),
10617 boolean_false_node),
10618 cctx->cancel_label, fallthru_label);
10619 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10620 break;
10621 default:
10622 break;
10624 /* FALLTHRU */
10625 default:
10626 if ((ctx || task_shared_vars)
10627 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10628 ctx ? NULL : &wi))
10630 /* Just remove clobbers, this should happen only if we have
10631 "privatized" local addressable variables in SIMD regions,
10632 the clobber isn't needed in that case and gimplifying address
10633 of the ARRAY_REF into a pointer and creating MEM_REF based
10634 clobber would create worse code than we get with the clobber
10635 dropped. */
10636 if (gimple_clobber_p (stmt))
10638 gsi_replace (gsi_p, gimple_build_nop (), true);
10639 break;
10641 gimple_regimplify_operands (stmt, gsi_p);
10643 break;
10647 static void
10648 lower_omp (gimple_seq *body, omp_context *ctx)
10650 location_t saved_location = input_location;
10651 gimple_stmt_iterator gsi;
10652 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10653 lower_omp_1 (&gsi, ctx);
10654 /* During gimplification, we have not always invoked fold_stmt
10655 (gimplify.c:maybe_fold_stmt); call it now. */
10656 if (target_nesting_level)
10657 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10658 fold_stmt (&gsi);
10659 input_location = saved_location;
10662 /* Main entry point. */
10664 static unsigned int
10665 execute_lower_omp (void)
10667 gimple_seq body;
10668 int i;
10669 omp_context *ctx;
10671 /* This pass always runs, to provide PROP_gimple_lomp.
10672 But there is nothing to do unless -fopenmp is given. */
10673 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_cilkplus == 0)
10674 return 0;
10676 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10677 delete_omp_context);
10679 body = gimple_body (current_function_decl);
10680 scan_omp (&body, NULL);
10681 gcc_assert (taskreg_nesting_level == 0);
10682 FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
10683 finish_taskreg_scan (ctx);
10684 taskreg_contexts.release ();
10686 if (all_contexts->root)
10688 if (task_shared_vars)
10689 push_gimplify_context ();
10690 lower_omp (&body, NULL);
10691 if (task_shared_vars)
10692 pop_gimplify_context (NULL);
10695 if (all_contexts)
10697 splay_tree_delete (all_contexts);
10698 all_contexts = NULL;
10700 BITMAP_FREE (task_shared_vars);
10701 return 0;
10704 namespace {
10706 const pass_data pass_data_lower_omp =
10708 GIMPLE_PASS, /* type */
10709 "omplower", /* name */
10710 OPTGROUP_NONE, /* optinfo_flags */
10711 TV_NONE, /* tv_id */
10712 PROP_gimple_any, /* properties_required */
10713 PROP_gimple_lomp, /* properties_provided */
10714 0, /* properties_destroyed */
10715 0, /* todo_flags_start */
10716 0, /* todo_flags_finish */
10719 class pass_lower_omp : public gimple_opt_pass
10721 public:
10722 pass_lower_omp (gcc::context *ctxt)
10723 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10726 /* opt_pass methods: */
10727 virtual unsigned int execute (function *) { return execute_lower_omp (); }
10729 }; // class pass_lower_omp
10731 } // anon namespace
10733 gimple_opt_pass *
10734 make_pass_lower_omp (gcc::context *ctxt)
10736 return new pass_lower_omp (ctxt);
10739 /* The following is a utility to diagnose OpenMP structured block violations.
10740 It is not part of the "omplower" pass, as that's invoked too late. It
10741 should be invoked by the respective front ends after gimplification. */
10743 static splay_tree all_labels;
10745 /* Check for mismatched contexts and generate an error if needed. Return
10746 true if an error is detected. */
10748 static bool
10749 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10750 gimple branch_ctx, gimple label_ctx)
10752 if (label_ctx == branch_ctx)
10753 return false;
10757 Previously we kept track of the label's entire context in diagnose_sb_[12]
10758 so we could traverse it and issue a correct "exit" or "enter" error
10759 message upon a structured block violation.
10761 We built the context by building a list with tree_cons'ing, but there is
10762 no easy counterpart in gimple tuples. It seems like far too much work
10763 for issuing exit/enter error messages. If someone really misses the
10764 distinct error message... patches welcome.
10767 #if 0
10768 /* Try to avoid confusing the user by producing and error message
10769 with correct "exit" or "enter" verbiage. We prefer "exit"
10770 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10771 if (branch_ctx == NULL)
10772 exit_p = false;
10773 else
10775 while (label_ctx)
10777 if (TREE_VALUE (label_ctx) == branch_ctx)
10779 exit_p = false;
10780 break;
10782 label_ctx = TREE_CHAIN (label_ctx);
10786 if (exit_p)
10787 error ("invalid exit from OpenMP structured block");
10788 else
10789 error ("invalid entry to OpenMP structured block");
10790 #endif
10792 bool cilkplus_block = false;
10793 if (flag_cilkplus)
10795 if ((branch_ctx
10796 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10797 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10798 || (label_ctx
10799 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
10800 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10801 cilkplus_block = true;
10804 /* If it's obvious we have an invalid entry, be specific about the error. */
10805 if (branch_ctx == NULL)
10807 if (cilkplus_block)
10808 error ("invalid entry to Cilk Plus structured block");
10809 else
10810 error ("invalid entry to OpenMP structured block");
10812 else
10814 /* Otherwise, be vague and lazy, but efficient. */
10815 if (cilkplus_block)
10816 error ("invalid branch to/from a Cilk Plus structured block");
10817 else
10818 error ("invalid branch to/from an OpenMP structured block");
10821 gsi_replace (gsi_p, gimple_build_nop (), false);
10822 return true;
10825 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10826 where each label is found. */
10828 static tree
10829 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10830 struct walk_stmt_info *wi)
10832 gimple context = (gimple) wi->info;
10833 gimple inner_context;
10834 gimple stmt = gsi_stmt (*gsi_p);
10836 *handled_ops_p = true;
10838 switch (gimple_code (stmt))
10840 WALK_SUBSTMTS;
10842 case GIMPLE_OMP_PARALLEL:
10843 case GIMPLE_OMP_TASK:
10844 case GIMPLE_OMP_SECTIONS:
10845 case GIMPLE_OMP_SINGLE:
10846 case GIMPLE_OMP_SECTION:
10847 case GIMPLE_OMP_MASTER:
10848 case GIMPLE_OMP_ORDERED:
10849 case GIMPLE_OMP_CRITICAL:
10850 case GIMPLE_OMP_TARGET:
10851 case GIMPLE_OMP_TEAMS:
10852 case GIMPLE_OMP_TASKGROUP:
10853 /* The minimal context here is just the current OMP construct. */
10854 inner_context = stmt;
10855 wi->info = inner_context;
10856 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10857 wi->info = context;
10858 break;
10860 case GIMPLE_OMP_FOR:
10861 inner_context = stmt;
10862 wi->info = inner_context;
10863 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10864 walk them. */
10865 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10866 diagnose_sb_1, NULL, wi);
10867 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10868 wi->info = context;
10869 break;
10871 case GIMPLE_LABEL:
10872 splay_tree_insert (all_labels,
10873 (splay_tree_key) gimple_label_label (
10874 as_a <gimple_label> (stmt)),
10875 (splay_tree_value) context);
10876 break;
10878 default:
10879 break;
10882 return NULL_TREE;
10885 /* Pass 2: Check each branch and see if its context differs from that of
10886 the destination label's context. */
10888 static tree
10889 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10890 struct walk_stmt_info *wi)
10892 gimple context = (gimple) wi->info;
10893 splay_tree_node n;
10894 gimple stmt = gsi_stmt (*gsi_p);
10896 *handled_ops_p = true;
10898 switch (gimple_code (stmt))
10900 WALK_SUBSTMTS;
10902 case GIMPLE_OMP_PARALLEL:
10903 case GIMPLE_OMP_TASK:
10904 case GIMPLE_OMP_SECTIONS:
10905 case GIMPLE_OMP_SINGLE:
10906 case GIMPLE_OMP_SECTION:
10907 case GIMPLE_OMP_MASTER:
10908 case GIMPLE_OMP_ORDERED:
10909 case GIMPLE_OMP_CRITICAL:
10910 case GIMPLE_OMP_TARGET:
10911 case GIMPLE_OMP_TEAMS:
10912 case GIMPLE_OMP_TASKGROUP:
10913 wi->info = stmt;
10914 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10915 wi->info = context;
10916 break;
10918 case GIMPLE_OMP_FOR:
10919 wi->info = stmt;
10920 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10921 walk them. */
10922 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10923 diagnose_sb_2, NULL, wi);
10924 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10925 wi->info = context;
10926 break;
10928 case GIMPLE_COND:
10930 gimple_cond cond_stmt = as_a <gimple_cond> (stmt);
10931 tree lab = gimple_cond_true_label (cond_stmt);
10932 if (lab)
10934 n = splay_tree_lookup (all_labels,
10935 (splay_tree_key) lab);
10936 diagnose_sb_0 (gsi_p, context,
10937 n ? (gimple) n->value : NULL);
10939 lab = gimple_cond_false_label (cond_stmt);
10940 if (lab)
10942 n = splay_tree_lookup (all_labels,
10943 (splay_tree_key) lab);
10944 diagnose_sb_0 (gsi_p, context,
10945 n ? (gimple) n->value : NULL);
10948 break;
10950 case GIMPLE_GOTO:
10952 tree lab = gimple_goto_dest (stmt);
10953 if (TREE_CODE (lab) != LABEL_DECL)
10954 break;
10956 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10957 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
10959 break;
10961 case GIMPLE_SWITCH:
10963 gimple_switch switch_stmt = as_a <gimple_switch> (stmt);
10964 unsigned int i;
10965 for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
10967 tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
10968 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10969 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
10970 break;
10973 break;
10975 case GIMPLE_RETURN:
10976 diagnose_sb_0 (gsi_p, context, NULL);
10977 break;
10979 default:
10980 break;
10983 return NULL_TREE;
10986 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10987 codes. */
10988 bool
10989 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
10990 int *region_idx)
10992 gimple last = last_stmt (bb);
10993 enum gimple_code code = gimple_code (last);
10994 struct omp_region *cur_region = *region;
10995 bool fallthru = false;
10997 switch (code)
10999 case GIMPLE_OMP_PARALLEL:
11000 case GIMPLE_OMP_TASK:
11001 case GIMPLE_OMP_FOR:
11002 case GIMPLE_OMP_SINGLE:
11003 case GIMPLE_OMP_TEAMS:
11004 case GIMPLE_OMP_MASTER:
11005 case GIMPLE_OMP_TASKGROUP:
11006 case GIMPLE_OMP_ORDERED:
11007 case GIMPLE_OMP_CRITICAL:
11008 case GIMPLE_OMP_SECTION:
11009 cur_region = new_omp_region (bb, code, cur_region);
11010 fallthru = true;
11011 break;
11013 case GIMPLE_OMP_TARGET:
11014 cur_region = new_omp_region (bb, code, cur_region);
11015 fallthru = true;
11016 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
11017 cur_region = cur_region->outer;
11018 break;
11020 case GIMPLE_OMP_SECTIONS:
11021 cur_region = new_omp_region (bb, code, cur_region);
11022 fallthru = true;
11023 break;
11025 case GIMPLE_OMP_SECTIONS_SWITCH:
11026 fallthru = false;
11027 break;
11029 case GIMPLE_OMP_ATOMIC_LOAD:
11030 case GIMPLE_OMP_ATOMIC_STORE:
11031 fallthru = true;
11032 break;
11034 case GIMPLE_OMP_RETURN:
11035 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
11036 somewhere other than the next block. This will be
11037 created later. */
11038 cur_region->exit = bb;
11039 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
11040 cur_region = cur_region->outer;
11041 break;
11043 case GIMPLE_OMP_CONTINUE:
11044 cur_region->cont = bb;
11045 switch (cur_region->type)
11047 case GIMPLE_OMP_FOR:
11048 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
11049 succs edges as abnormal to prevent splitting
11050 them. */
11051 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
11052 /* Make the loopback edge. */
11053 make_edge (bb, single_succ (cur_region->entry),
11054 EDGE_ABNORMAL);
11056 /* Create an edge from GIMPLE_OMP_FOR to exit, which
11057 corresponds to the case that the body of the loop
11058 is not executed at all. */
11059 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
11060 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
11061 fallthru = false;
11062 break;
11064 case GIMPLE_OMP_SECTIONS:
11065 /* Wire up the edges into and out of the nested sections. */
11067 basic_block switch_bb = single_succ (cur_region->entry);
11069 struct omp_region *i;
11070 for (i = cur_region->inner; i ; i = i->next)
11072 gcc_assert (i->type == GIMPLE_OMP_SECTION);
11073 make_edge (switch_bb, i->entry, 0);
11074 make_edge (i->exit, bb, EDGE_FALLTHRU);
11077 /* Make the loopback edge to the block with
11078 GIMPLE_OMP_SECTIONS_SWITCH. */
11079 make_edge (bb, switch_bb, 0);
11081 /* Make the edge from the switch to exit. */
11082 make_edge (switch_bb, bb->next_bb, 0);
11083 fallthru = false;
11085 break;
11087 default:
11088 gcc_unreachable ();
11090 break;
11092 default:
11093 gcc_unreachable ();
11096 if (*region != cur_region)
11098 *region = cur_region;
11099 if (cur_region)
11100 *region_idx = cur_region->entry->index;
11101 else
11102 *region_idx = 0;
11105 return fallthru;
11108 static unsigned int
11109 diagnose_omp_structured_block_errors (void)
11111 struct walk_stmt_info wi;
11112 gimple_seq body = gimple_body (current_function_decl);
11114 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
11116 memset (&wi, 0, sizeof (wi));
11117 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
11119 memset (&wi, 0, sizeof (wi));
11120 wi.want_locations = true;
11121 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
11123 gimple_set_body (current_function_decl, body);
11125 splay_tree_delete (all_labels);
11126 all_labels = NULL;
11128 return 0;
11131 namespace {
11133 const pass_data pass_data_diagnose_omp_blocks =
11135 GIMPLE_PASS, /* type */
11136 "*diagnose_omp_blocks", /* name */
11137 OPTGROUP_NONE, /* optinfo_flags */
11138 TV_NONE, /* tv_id */
11139 PROP_gimple_any, /* properties_required */
11140 0, /* properties_provided */
11141 0, /* properties_destroyed */
11142 0, /* todo_flags_start */
11143 0, /* todo_flags_finish */
11146 class pass_diagnose_omp_blocks : public gimple_opt_pass
11148 public:
11149 pass_diagnose_omp_blocks (gcc::context *ctxt)
11150 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
11153 /* opt_pass methods: */
11154 virtual bool gate (function *) { return flag_openmp || flag_cilkplus; }
11155 virtual unsigned int execute (function *)
11157 return diagnose_omp_structured_block_errors ();
11160 }; // class pass_diagnose_omp_blocks
11162 } // anon namespace
11164 gimple_opt_pass *
11165 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
11167 return new pass_diagnose_omp_blocks (ctxt);
11170 /* SIMD clone supporting code. */
11172 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
11173 of arguments to reserve space for. */
11175 static struct cgraph_simd_clone *
11176 simd_clone_struct_alloc (int nargs)
11178 struct cgraph_simd_clone *clone_info;
11179 size_t len = (sizeof (struct cgraph_simd_clone)
11180 + nargs * sizeof (struct cgraph_simd_clone_arg));
11181 clone_info = (struct cgraph_simd_clone *)
11182 ggc_internal_cleared_alloc (len);
11183 return clone_info;
11186 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
11188 static inline void
11189 simd_clone_struct_copy (struct cgraph_simd_clone *to,
11190 struct cgraph_simd_clone *from)
11192 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
11193 + ((from->nargs - from->inbranch)
11194 * sizeof (struct cgraph_simd_clone_arg))));
11197 /* Return vector of parameter types of function FNDECL. This uses
11198 TYPE_ARG_TYPES if available, otherwise falls back to types of
11199 DECL_ARGUMENTS types. */
11201 vec<tree>
11202 simd_clone_vector_of_formal_parm_types (tree fndecl)
11204 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
11205 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
11206 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
11207 unsigned int i;
11208 tree arg;
11209 FOR_EACH_VEC_ELT (args, i, arg)
11210 args[i] = TREE_TYPE (args[i]);
11211 return args;
11214 /* Given a simd function in NODE, extract the simd specific
11215 information from the OMP clauses passed in CLAUSES, and return
11216 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
11217 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
11218 otherwise set to FALSE. */
11220 static struct cgraph_simd_clone *
11221 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
11222 bool *inbranch_specified)
11224 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
11225 tree t;
11226 int n;
11227 *inbranch_specified = false;
11229 n = args.length ();
11230 if (n > 0 && args.last () == void_type_node)
11231 n--;
11233 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
11234 be cloned have a distinctive artificial label in addition to "omp
11235 declare simd". */
11236 bool cilk_clone
11237 = (flag_cilkplus
11238 && lookup_attribute ("cilk simd function",
11239 DECL_ATTRIBUTES (node->decl)));
11241 /* Allocate one more than needed just in case this is an in-branch
11242 clone which will require a mask argument. */
11243 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
11244 clone_info->nargs = n;
11245 clone_info->cilk_elemental = cilk_clone;
11247 if (!clauses)
11249 args.release ();
11250 return clone_info;
11252 clauses = TREE_VALUE (clauses);
11253 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
11254 return clone_info;
11256 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
11258 switch (OMP_CLAUSE_CODE (t))
11260 case OMP_CLAUSE_INBRANCH:
11261 clone_info->inbranch = 1;
11262 *inbranch_specified = true;
11263 break;
11264 case OMP_CLAUSE_NOTINBRANCH:
11265 clone_info->inbranch = 0;
11266 *inbranch_specified = true;
11267 break;
11268 case OMP_CLAUSE_SIMDLEN:
11269 clone_info->simdlen
11270 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
11271 break;
11272 case OMP_CLAUSE_LINEAR:
11274 tree decl = OMP_CLAUSE_DECL (t);
11275 tree step = OMP_CLAUSE_LINEAR_STEP (t);
11276 int argno = TREE_INT_CST_LOW (decl);
11277 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
11279 clone_info->args[argno].arg_type
11280 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
11281 clone_info->args[argno].linear_step = tree_to_shwi (step);
11282 gcc_assert (clone_info->args[argno].linear_step >= 0
11283 && clone_info->args[argno].linear_step < n);
11285 else
11287 if (POINTER_TYPE_P (args[argno]))
11288 step = fold_convert (ssizetype, step);
11289 if (!tree_fits_shwi_p (step))
11291 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11292 "ignoring large linear step");
11293 args.release ();
11294 return NULL;
11296 else if (integer_zerop (step))
11298 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11299 "ignoring zero linear step");
11300 args.release ();
11301 return NULL;
11303 else
11305 clone_info->args[argno].arg_type
11306 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
11307 clone_info->args[argno].linear_step = tree_to_shwi (step);
11310 break;
11312 case OMP_CLAUSE_UNIFORM:
11314 tree decl = OMP_CLAUSE_DECL (t);
11315 int argno = tree_to_uhwi (decl);
11316 clone_info->args[argno].arg_type
11317 = SIMD_CLONE_ARG_TYPE_UNIFORM;
11318 break;
11320 case OMP_CLAUSE_ALIGNED:
11322 tree decl = OMP_CLAUSE_DECL (t);
11323 int argno = tree_to_uhwi (decl);
11324 clone_info->args[argno].alignment
11325 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
11326 break;
11328 default:
11329 break;
11332 args.release ();
11333 return clone_info;
11336 /* Given a SIMD clone in NODE, calculate the characteristic data
11337 type and return the coresponding type. The characteristic data
11338 type is computed as described in the Intel Vector ABI. */
11340 static tree
11341 simd_clone_compute_base_data_type (struct cgraph_node *node,
11342 struct cgraph_simd_clone *clone_info)
11344 tree type = integer_type_node;
11345 tree fndecl = node->decl;
11347 /* a) For non-void function, the characteristic data type is the
11348 return type. */
11349 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
11350 type = TREE_TYPE (TREE_TYPE (fndecl));
11352 /* b) If the function has any non-uniform, non-linear parameters,
11353 then the characteristic data type is the type of the first
11354 such parameter. */
11355 else
11357 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
11358 for (unsigned int i = 0; i < clone_info->nargs; ++i)
11359 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
11361 type = map[i];
11362 break;
11364 map.release ();
11367 /* c) If the characteristic data type determined by a) or b) above
11368 is struct, union, or class type which is pass-by-value (except
11369 for the type that maps to the built-in complex data type), the
11370 characteristic data type is int. */
11371 if (RECORD_OR_UNION_TYPE_P (type)
11372 && !aggregate_value_p (type, NULL)
11373 && TREE_CODE (type) != COMPLEX_TYPE)
11374 return integer_type_node;
11376 /* d) If none of the above three classes is applicable, the
11377 characteristic data type is int. */
11379 return type;
11381 /* e) For Intel Xeon Phi native and offload compilation, if the
11382 resulting characteristic data type is 8-bit or 16-bit integer
11383 data type, the characteristic data type is int. */
11384 /* Well, we don't handle Xeon Phi yet. */
11387 static tree
11388 simd_clone_mangle (struct cgraph_node *node,
11389 struct cgraph_simd_clone *clone_info)
11391 char vecsize_mangle = clone_info->vecsize_mangle;
11392 char mask = clone_info->inbranch ? 'M' : 'N';
11393 unsigned int simdlen = clone_info->simdlen;
11394 unsigned int n;
11395 pretty_printer pp;
11397 gcc_assert (vecsize_mangle && simdlen);
11399 pp_string (&pp, "_ZGV");
11400 pp_character (&pp, vecsize_mangle);
11401 pp_character (&pp, mask);
11402 pp_decimal_int (&pp, simdlen);
11404 for (n = 0; n < clone_info->nargs; ++n)
11406 struct cgraph_simd_clone_arg arg = clone_info->args[n];
11408 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
11409 pp_character (&pp, 'u');
11410 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
11412 gcc_assert (arg.linear_step != 0);
11413 pp_character (&pp, 'l');
11414 if (arg.linear_step > 1)
11415 pp_unsigned_wide_integer (&pp, arg.linear_step);
11416 else if (arg.linear_step < 0)
11418 pp_character (&pp, 'n');
11419 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
11420 arg.linear_step));
11423 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
11425 pp_character (&pp, 's');
11426 pp_unsigned_wide_integer (&pp, arg.linear_step);
11428 else
11429 pp_character (&pp, 'v');
11430 if (arg.alignment)
11432 pp_character (&pp, 'a');
11433 pp_decimal_int (&pp, arg.alignment);
11437 pp_underscore (&pp);
11438 pp_string (&pp,
11439 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)));
11440 const char *str = pp_formatted_text (&pp);
11442 /* If there already is a SIMD clone with the same mangled name, don't
11443 add another one. This can happen e.g. for
11444 #pragma omp declare simd
11445 #pragma omp declare simd simdlen(8)
11446 int foo (int, int);
11447 if the simdlen is assumed to be 8 for the first one, etc. */
11448 for (struct cgraph_node *clone = node->simd_clones; clone;
11449 clone = clone->simdclone->next_clone)
11450 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
11451 str) == 0)
11452 return NULL_TREE;
11454 return get_identifier (str);
11457 /* Create a simd clone of OLD_NODE and return it. */
11459 static struct cgraph_node *
11460 simd_clone_create (struct cgraph_node *old_node)
11462 struct cgraph_node *new_node;
11463 if (old_node->definition)
11465 if (!old_node->has_gimple_body_p ())
11466 return NULL;
11467 old_node->get_body ();
11468 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
11469 false, NULL, NULL,
11470 "simdclone");
11472 else
11474 tree old_decl = old_node->decl;
11475 tree new_decl = copy_node (old_node->decl);
11476 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
11477 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
11478 SET_DECL_RTL (new_decl, NULL);
11479 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
11480 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
11481 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
11482 symtab->call_cgraph_insertion_hooks (new_node);
11484 if (new_node == NULL)
11485 return new_node;
11487 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
11489 /* The function cgraph_function_versioning () will force the new
11490 symbol local. Undo this, and inherit external visability from
11491 the old node. */
11492 new_node->local.local = old_node->local.local;
11493 new_node->externally_visible = old_node->externally_visible;
11495 return new_node;
11498 /* Adjust the return type of the given function to its appropriate
11499 vector counterpart. Returns a simd array to be used throughout the
11500 function as a return value. */
11502 static tree
11503 simd_clone_adjust_return_type (struct cgraph_node *node)
11505 tree fndecl = node->decl;
11506 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
11507 unsigned int veclen;
11508 tree t;
11510 /* Adjust the function return type. */
11511 if (orig_rettype == void_type_node)
11512 return NULL_TREE;
11513 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
11514 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))
11515 || POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl))))
11516 veclen = node->simdclone->vecsize_int;
11517 else
11518 veclen = node->simdclone->vecsize_float;
11519 veclen /= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl))));
11520 if (veclen > node->simdclone->simdlen)
11521 veclen = node->simdclone->simdlen;
11522 if (veclen == node->simdclone->simdlen)
11523 TREE_TYPE (TREE_TYPE (fndecl))
11524 = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)),
11525 node->simdclone->simdlen);
11526 else
11528 t = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)), veclen);
11529 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
11530 TREE_TYPE (TREE_TYPE (fndecl)) = t;
11532 if (!node->definition)
11533 return NULL_TREE;
11535 t = DECL_RESULT (fndecl);
11536 /* Adjust the DECL_RESULT. */
11537 gcc_assert (TREE_TYPE (t) != void_type_node);
11538 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
11539 relayout_decl (t);
11541 tree atype = build_array_type_nelts (orig_rettype,
11542 node->simdclone->simdlen);
11543 if (veclen != node->simdclone->simdlen)
11544 return build1 (VIEW_CONVERT_EXPR, atype, t);
11546 /* Set up a SIMD array to use as the return value. */
11547 tree retval = create_tmp_var_raw (atype, "retval");
11548 gimple_add_tmp_var (retval);
11549 return retval;
11552 /* Each vector argument has a corresponding array to be used locally
11553 as part of the eventual loop. Create such temporary array and
11554 return it.
11556 PREFIX is the prefix to be used for the temporary.
11558 TYPE is the inner element type.
11560 SIMDLEN is the number of elements. */
11562 static tree
11563 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
11565 tree atype = build_array_type_nelts (type, simdlen);
11566 tree avar = create_tmp_var_raw (atype, prefix);
11567 gimple_add_tmp_var (avar);
11568 return avar;
11571 /* Modify the function argument types to their corresponding vector
11572 counterparts if appropriate. Also, create one array for each simd
11573 argument to be used locally when using the function arguments as
11574 part of the loop.
11576 NODE is the function whose arguments are to be adjusted.
11578 Returns an adjustment vector that will be filled describing how the
11579 argument types will be adjusted. */
11581 static ipa_parm_adjustment_vec
11582 simd_clone_adjust_argument_types (struct cgraph_node *node)
11584 vec<tree> args;
11585 ipa_parm_adjustment_vec adjustments;
11587 if (node->definition)
11588 args = ipa_get_vector_of_formal_parms (node->decl);
11589 else
11590 args = simd_clone_vector_of_formal_parm_types (node->decl);
11591 adjustments.create (args.length ());
11592 unsigned i, j, veclen;
11593 struct ipa_parm_adjustment adj;
11594 for (i = 0; i < node->simdclone->nargs; ++i)
11596 memset (&adj, 0, sizeof (adj));
11597 tree parm = args[i];
11598 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
11599 adj.base_index = i;
11600 adj.base = parm;
11602 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
11603 node->simdclone->args[i].orig_type = parm_type;
11605 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
11607 /* No adjustment necessary for scalar arguments. */
11608 adj.op = IPA_PARM_OP_COPY;
11610 else
11612 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
11613 veclen = node->simdclone->vecsize_int;
11614 else
11615 veclen = node->simdclone->vecsize_float;
11616 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
11617 if (veclen > node->simdclone->simdlen)
11618 veclen = node->simdclone->simdlen;
11619 adj.arg_prefix = "simd";
11620 adj.type = build_vector_type (parm_type, veclen);
11621 node->simdclone->args[i].vector_type = adj.type;
11622 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11624 adjustments.safe_push (adj);
11625 if (j == veclen)
11627 memset (&adj, 0, sizeof (adj));
11628 adj.op = IPA_PARM_OP_NEW;
11629 adj.arg_prefix = "simd";
11630 adj.base_index = i;
11631 adj.type = node->simdclone->args[i].vector_type;
11635 if (node->definition)
11636 node->simdclone->args[i].simd_array
11637 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
11638 parm_type, node->simdclone->simdlen);
11640 adjustments.safe_push (adj);
11643 if (node->simdclone->inbranch)
11645 tree base_type
11646 = simd_clone_compute_base_data_type (node->simdclone->origin,
11647 node->simdclone);
11649 memset (&adj, 0, sizeof (adj));
11650 adj.op = IPA_PARM_OP_NEW;
11651 adj.arg_prefix = "mask";
11653 adj.base_index = i;
11654 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
11655 veclen = node->simdclone->vecsize_int;
11656 else
11657 veclen = node->simdclone->vecsize_float;
11658 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
11659 if (veclen > node->simdclone->simdlen)
11660 veclen = node->simdclone->simdlen;
11661 adj.type = build_vector_type (base_type, veclen);
11662 adjustments.safe_push (adj);
11664 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11665 adjustments.safe_push (adj);
11667 /* We have previously allocated one extra entry for the mask. Use
11668 it and fill it. */
11669 struct cgraph_simd_clone *sc = node->simdclone;
11670 sc->nargs++;
11671 if (node->definition)
11673 sc->args[i].orig_arg
11674 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
11675 sc->args[i].simd_array
11676 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
11678 sc->args[i].orig_type = base_type;
11679 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
11682 if (node->definition)
11683 ipa_modify_formal_parameters (node->decl, adjustments);
11684 else
11686 tree new_arg_types = NULL_TREE, new_reversed;
11687 bool last_parm_void = false;
11688 if (args.length () > 0 && args.last () == void_type_node)
11689 last_parm_void = true;
11691 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
11692 j = adjustments.length ();
11693 for (i = 0; i < j; i++)
11695 struct ipa_parm_adjustment *adj = &adjustments[i];
11696 tree ptype;
11697 if (adj->op == IPA_PARM_OP_COPY)
11698 ptype = args[adj->base_index];
11699 else
11700 ptype = adj->type;
11701 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
11703 new_reversed = nreverse (new_arg_types);
11704 if (last_parm_void)
11706 if (new_reversed)
11707 TREE_CHAIN (new_arg_types) = void_list_node;
11708 else
11709 new_reversed = void_list_node;
11712 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
11713 TYPE_ARG_TYPES (new_type) = new_reversed;
11714 TREE_TYPE (node->decl) = new_type;
11716 adjustments.release ();
11718 args.release ();
11719 return adjustments;
11722 /* Initialize and copy the function arguments in NODE to their
11723 corresponding local simd arrays. Returns a fresh gimple_seq with
11724 the instruction sequence generated. */
11726 static gimple_seq
11727 simd_clone_init_simd_arrays (struct cgraph_node *node,
11728 ipa_parm_adjustment_vec adjustments)
11730 gimple_seq seq = NULL;
11731 unsigned i = 0, j = 0, k;
11733 for (tree arg = DECL_ARGUMENTS (node->decl);
11734 arg;
11735 arg = DECL_CHAIN (arg), i++, j++)
11737 if (adjustments[j].op == IPA_PARM_OP_COPY)
11738 continue;
11740 node->simdclone->args[i].vector_arg = arg;
11742 tree array = node->simdclone->args[i].simd_array;
11743 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
11745 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11746 tree ptr = build_fold_addr_expr (array);
11747 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11748 build_int_cst (ptype, 0));
11749 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11750 gimplify_and_add (t, &seq);
11752 else
11754 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
11755 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11756 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
11758 tree ptr = build_fold_addr_expr (array);
11759 int elemsize;
11760 if (k)
11762 arg = DECL_CHAIN (arg);
11763 j++;
11765 elemsize
11766 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
11767 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11768 build_int_cst (ptype, k * elemsize));
11769 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11770 gimplify_and_add (t, &seq);
11774 return seq;
11777 /* Callback info for ipa_simd_modify_stmt_ops below. */
11779 struct modify_stmt_info {
11780 ipa_parm_adjustment_vec adjustments;
11781 gimple stmt;
11782 /* True if the parent statement was modified by
11783 ipa_simd_modify_stmt_ops. */
11784 bool modified;
11787 /* Callback for walk_gimple_op.
11789 Adjust operands from a given statement as specified in the
11790 adjustments vector in the callback data. */
11792 static tree
11793 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
11795 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
11796 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
11797 tree *orig_tp = tp;
11798 if (TREE_CODE (*tp) == ADDR_EXPR)
11799 tp = &TREE_OPERAND (*tp, 0);
11800 struct ipa_parm_adjustment *cand = NULL;
11801 if (TREE_CODE (*tp) == PARM_DECL)
11802 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
11803 else
11805 if (TYPE_P (*tp))
11806 *walk_subtrees = 0;
11809 tree repl = NULL_TREE;
11810 if (cand)
11811 repl = unshare_expr (cand->new_decl);
11812 else
11814 if (tp != orig_tp)
11816 *walk_subtrees = 0;
11817 bool modified = info->modified;
11818 info->modified = false;
11819 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
11820 if (!info->modified)
11822 info->modified = modified;
11823 return NULL_TREE;
11825 info->modified = modified;
11826 repl = *tp;
11828 else
11829 return NULL_TREE;
11832 if (tp != orig_tp)
11834 repl = build_fold_addr_expr (repl);
11835 gimple stmt;
11836 if (is_gimple_debug (info->stmt))
11838 tree vexpr = make_node (DEBUG_EXPR_DECL);
11839 stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
11840 DECL_ARTIFICIAL (vexpr) = 1;
11841 TREE_TYPE (vexpr) = TREE_TYPE (repl);
11842 DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (repl));
11843 repl = vexpr;
11845 else
11847 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl),
11848 NULL), repl);
11849 repl = gimple_assign_lhs (stmt);
11851 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
11852 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11853 *orig_tp = repl;
11855 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
11857 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
11858 *tp = vce;
11860 else
11861 *tp = repl;
11863 info->modified = true;
11864 return NULL_TREE;
11867 /* Traverse the function body and perform all modifications as
11868 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11869 modified such that the replacement/reduction value will now be an
11870 offset into the corresponding simd_array.
11872 This function will replace all function argument uses with their
11873 corresponding simd array elements, and ajust the return values
11874 accordingly. */
11876 static void
11877 ipa_simd_modify_function_body (struct cgraph_node *node,
11878 ipa_parm_adjustment_vec adjustments,
11879 tree retval_array, tree iter)
11881 basic_block bb;
11882 unsigned int i, j, l;
11884 /* Re-use the adjustments array, but this time use it to replace
11885 every function argument use to an offset into the corresponding
11886 simd_array. */
11887 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
11889 if (!node->simdclone->args[i].vector_arg)
11890 continue;
11892 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
11893 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
11894 adjustments[j].new_decl
11895 = build4 (ARRAY_REF,
11896 basetype,
11897 node->simdclone->args[i].simd_array,
11898 iter,
11899 NULL_TREE, NULL_TREE);
11900 if (adjustments[j].op == IPA_PARM_OP_NONE
11901 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
11902 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
11905 l = adjustments.length ();
11906 for (i = 1; i < num_ssa_names; i++)
11908 tree name = ssa_name (i);
11909 if (name
11910 && SSA_NAME_VAR (name)
11911 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
11913 for (j = 0; j < l; j++)
11914 if (SSA_NAME_VAR (name) == adjustments[j].base
11915 && adjustments[j].new_decl)
11917 tree base_var;
11918 if (adjustments[j].new_ssa_base == NULL_TREE)
11920 base_var
11921 = copy_var_decl (adjustments[j].base,
11922 DECL_NAME (adjustments[j].base),
11923 TREE_TYPE (adjustments[j].base));
11924 adjustments[j].new_ssa_base = base_var;
11926 else
11927 base_var = adjustments[j].new_ssa_base;
11928 if (SSA_NAME_IS_DEFAULT_DEF (name))
11930 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11931 gimple_stmt_iterator gsi = gsi_after_labels (bb);
11932 tree new_decl = unshare_expr (adjustments[j].new_decl);
11933 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
11934 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11935 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
11936 gimple stmt = gimple_build_assign (name, new_decl);
11937 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11939 else
11940 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11945 struct modify_stmt_info info;
11946 info.adjustments = adjustments;
11948 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
11950 gimple_stmt_iterator gsi;
11952 gsi = gsi_start_bb (bb);
11953 while (!gsi_end_p (gsi))
11955 gimple stmt = gsi_stmt (gsi);
11956 info.stmt = stmt;
11957 struct walk_stmt_info wi;
11959 memset (&wi, 0, sizeof (wi));
11960 info.modified = false;
11961 wi.info = &info;
11962 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
11964 if (gimple_return return_stmt = dyn_cast <gimple_return> (stmt))
11966 tree retval = gimple_return_retval (return_stmt);
11967 if (!retval)
11969 gsi_remove (&gsi, true);
11970 continue;
11973 /* Replace `return foo' with `retval_array[iter] = foo'. */
11974 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
11975 retval_array, iter, NULL, NULL);
11976 stmt = gimple_build_assign (ref, retval);
11977 gsi_replace (&gsi, stmt, true);
11978 info.modified = true;
11981 if (info.modified)
11983 update_stmt (stmt);
11984 if (maybe_clean_eh_stmt (stmt))
11985 gimple_purge_dead_eh_edges (gimple_bb (stmt));
11987 gsi_next (&gsi);
11992 /* Adjust the argument types in NODE to their appropriate vector
11993 counterparts. */
11995 static void
11996 simd_clone_adjust (struct cgraph_node *node)
11998 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
12000 targetm.simd_clone.adjust (node);
12002 tree retval = simd_clone_adjust_return_type (node);
12003 ipa_parm_adjustment_vec adjustments
12004 = simd_clone_adjust_argument_types (node);
12006 push_gimplify_context ();
12008 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
12010 /* Adjust all uses of vector arguments accordingly. Adjust all
12011 return values accordingly. */
12012 tree iter = create_tmp_var (unsigned_type_node, "iter");
12013 tree iter1 = make_ssa_name (iter, NULL);
12014 tree iter2 = make_ssa_name (iter, NULL);
12015 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
12017 /* Initialize the iteration variable. */
12018 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12019 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
12020 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
12021 /* Insert the SIMD array and iv initialization at function
12022 entry. */
12023 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
12025 pop_gimplify_context (NULL);
12027 /* Create a new BB right before the original exit BB, to hold the
12028 iteration increment and the condition/branch. */
12029 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
12030 basic_block incr_bb = create_empty_bb (orig_exit);
12031 add_bb_to_loop (incr_bb, body_bb->loop_father);
12032 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
12033 flag. Set it now to be a FALLTHRU_EDGE. */
12034 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
12035 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
12036 for (unsigned i = 0;
12037 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
12039 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
12040 redirect_edge_succ (e, incr_bb);
12042 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
12043 e->probability = REG_BR_PROB_BASE;
12044 gsi = gsi_last_bb (incr_bb);
12045 gimple g = gimple_build_assign_with_ops (PLUS_EXPR, iter2, iter1,
12046 build_int_cst (unsigned_type_node,
12047 1));
12048 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12050 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
12051 struct loop *loop = alloc_loop ();
12052 cfun->has_force_vectorize_loops = true;
12053 loop->safelen = node->simdclone->simdlen;
12054 loop->force_vectorize = true;
12055 loop->header = body_bb;
12057 /* Branch around the body if the mask applies. */
12058 if (node->simdclone->inbranch)
12060 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
12061 tree mask_array
12062 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
12063 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)), NULL);
12064 tree aref = build4 (ARRAY_REF,
12065 TREE_TYPE (TREE_TYPE (mask_array)),
12066 mask_array, iter1,
12067 NULL, NULL);
12068 g = gimple_build_assign (mask, aref);
12069 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12070 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
12071 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
12073 aref = build1 (VIEW_CONVERT_EXPR,
12074 build_nonstandard_integer_type (bitsize, 0), mask);
12075 mask = make_ssa_name (TREE_TYPE (aref), NULL);
12076 g = gimple_build_assign (mask, aref);
12077 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12080 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
12081 NULL, NULL);
12082 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12083 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
12084 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
12087 /* Generate the condition. */
12088 g = gimple_build_cond (LT_EXPR,
12089 iter2,
12090 build_int_cst (unsigned_type_node,
12091 node->simdclone->simdlen),
12092 NULL, NULL);
12093 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12094 e = split_block (incr_bb, gsi_stmt (gsi));
12095 basic_block latch_bb = e->dest;
12096 basic_block new_exit_bb;
12097 new_exit_bb = split_block (latch_bb, NULL)->dest;
12098 loop->latch = latch_bb;
12100 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
12102 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
12103 /* The successor of incr_bb is already pointing to latch_bb; just
12104 change the flags.
12105 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
12106 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
12108 gimple_phi phi = create_phi_node (iter1, body_bb);
12109 edge preheader_edge = find_edge (entry_bb, body_bb);
12110 edge latch_edge = single_succ_edge (latch_bb);
12111 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
12112 UNKNOWN_LOCATION);
12113 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12115 /* Generate the new return. */
12116 gsi = gsi_last_bb (new_exit_bb);
12117 if (retval
12118 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
12119 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
12120 retval = TREE_OPERAND (retval, 0);
12121 else if (retval)
12123 retval = build1 (VIEW_CONVERT_EXPR,
12124 TREE_TYPE (TREE_TYPE (node->decl)),
12125 retval);
12126 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
12127 false, GSI_CONTINUE_LINKING);
12129 g = gimple_build_return (retval);
12130 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12132 /* Handle aligned clauses by replacing default defs of the aligned
12133 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
12134 lhs. Handle linear by adding PHIs. */
12135 for (unsigned i = 0; i < node->simdclone->nargs; i++)
12136 if (node->simdclone->args[i].alignment
12137 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
12138 && (node->simdclone->args[i].alignment
12139 & (node->simdclone->args[i].alignment - 1)) == 0
12140 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
12141 == POINTER_TYPE)
12143 unsigned int alignment = node->simdclone->args[i].alignment;
12144 tree orig_arg = node->simdclone->args[i].orig_arg;
12145 tree def = ssa_default_def (cfun, orig_arg);
12146 if (def && !has_zero_uses (def))
12148 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
12149 gimple_seq seq = NULL;
12150 bool need_cvt = false;
12151 gimple_call call
12152 = gimple_build_call (fn, 2, def, size_int (alignment));
12153 g = call;
12154 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
12155 ptr_type_node))
12156 need_cvt = true;
12157 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg, NULL);
12158 gimple_call_set_lhs (g, t);
12159 gimple_seq_add_stmt_without_update (&seq, g);
12160 if (need_cvt)
12162 t = make_ssa_name (orig_arg, NULL);
12163 g = gimple_build_assign_with_ops (NOP_EXPR, t,
12164 gimple_call_lhs (g),
12165 NULL_TREE);
12166 gimple_seq_add_stmt_without_update (&seq, g);
12168 gsi_insert_seq_on_edge_immediate
12169 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
12171 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12172 int freq = compute_call_stmt_bb_frequency (current_function_decl,
12173 entry_bb);
12174 node->create_edge (cgraph_node::get_create (fn),
12175 call, entry_bb->count, freq);
12177 imm_use_iterator iter;
12178 use_operand_p use_p;
12179 gimple use_stmt;
12180 tree repl = gimple_get_lhs (g);
12181 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12182 if (is_gimple_debug (use_stmt) || use_stmt == call)
12183 continue;
12184 else
12185 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12186 SET_USE (use_p, repl);
12189 else if (node->simdclone->args[i].arg_type
12190 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
12192 tree orig_arg = node->simdclone->args[i].orig_arg;
12193 tree def = ssa_default_def (cfun, orig_arg);
12194 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12195 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
12196 if (def && !has_zero_uses (def))
12198 iter1 = make_ssa_name (orig_arg, NULL);
12199 iter2 = make_ssa_name (orig_arg, NULL);
12200 phi = create_phi_node (iter1, body_bb);
12201 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
12202 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12203 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12204 ? PLUS_EXPR : POINTER_PLUS_EXPR;
12205 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12206 ? TREE_TYPE (orig_arg) : sizetype;
12207 tree addcst
12208 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
12209 g = gimple_build_assign_with_ops (code, iter2, iter1, addcst);
12210 gsi = gsi_last_bb (incr_bb);
12211 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
12213 imm_use_iterator iter;
12214 use_operand_p use_p;
12215 gimple use_stmt;
12216 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12217 if (use_stmt == phi)
12218 continue;
12219 else
12220 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12221 SET_USE (use_p, iter1);
12225 calculate_dominance_info (CDI_DOMINATORS);
12226 add_loop (loop, loop->header->loop_father);
12227 update_ssa (TODO_update_ssa);
12229 pop_cfun ();
12232 /* If the function in NODE is tagged as an elemental SIMD function,
12233 create the appropriate SIMD clones. */
12235 static void
12236 expand_simd_clones (struct cgraph_node *node)
12238 tree attr = lookup_attribute ("omp declare simd",
12239 DECL_ATTRIBUTES (node->decl));
12240 if (attr == NULL_TREE
12241 || node->global.inlined_to
12242 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
12243 return;
12245 /* Ignore
12246 #pragma omp declare simd
12247 extern int foo ();
12248 in C, there we don't know the argument types at all. */
12249 if (!node->definition
12250 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
12251 return;
12255 /* Start with parsing the "omp declare simd" attribute(s). */
12256 bool inbranch_clause_specified;
12257 struct cgraph_simd_clone *clone_info
12258 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
12259 &inbranch_clause_specified);
12260 if (clone_info == NULL)
12261 continue;
12263 int orig_simdlen = clone_info->simdlen;
12264 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
12265 /* The target can return 0 (no simd clones should be created),
12266 1 (just one ISA of simd clones should be created) or higher
12267 count of ISA variants. In that case, clone_info is initialized
12268 for the first ISA variant. */
12269 int count
12270 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
12271 base_type, 0);
12272 if (count == 0)
12273 continue;
12275 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
12276 also create one inbranch and one !inbranch clone of it. */
12277 for (int i = 0; i < count * 2; i++)
12279 struct cgraph_simd_clone *clone = clone_info;
12280 if (inbranch_clause_specified && (i & 1) != 0)
12281 continue;
12283 if (i != 0)
12285 clone = simd_clone_struct_alloc (clone_info->nargs
12286 + ((i & 1) != 0));
12287 simd_clone_struct_copy (clone, clone_info);
12288 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
12289 and simd_clone_adjust_argument_types did to the first
12290 clone's info. */
12291 clone->nargs -= clone_info->inbranch;
12292 clone->simdlen = orig_simdlen;
12293 /* And call the target hook again to get the right ISA. */
12294 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
12295 base_type,
12296 i / 2);
12297 if ((i & 1) != 0)
12298 clone->inbranch = 1;
12301 /* simd_clone_mangle might fail if such a clone has been created
12302 already. */
12303 tree id = simd_clone_mangle (node, clone);
12304 if (id == NULL_TREE)
12305 continue;
12307 /* Only when we are sure we want to create the clone actually
12308 clone the function (or definitions) or create another
12309 extern FUNCTION_DECL (for prototypes without definitions). */
12310 struct cgraph_node *n = simd_clone_create (node);
12311 if (n == NULL)
12312 continue;
12314 n->simdclone = clone;
12315 clone->origin = node;
12316 clone->next_clone = NULL;
12317 if (node->simd_clones == NULL)
12319 clone->prev_clone = n;
12320 node->simd_clones = n;
12322 else
12324 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
12325 clone->prev_clone->simdclone->next_clone = n;
12326 node->simd_clones->simdclone->prev_clone = n;
12328 symtab->change_decl_assembler_name (n->decl, id);
12329 /* And finally adjust the return type, parameters and for
12330 definitions also function body. */
12331 if (node->definition)
12332 simd_clone_adjust (n);
12333 else
12335 simd_clone_adjust_return_type (n);
12336 simd_clone_adjust_argument_types (n);
12340 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
12343 /* Entry point for IPA simd clone creation pass. */
12345 static unsigned int
12346 ipa_omp_simd_clone (void)
12348 struct cgraph_node *node;
12349 FOR_EACH_FUNCTION (node)
12350 expand_simd_clones (node);
12351 return 0;
12354 namespace {
12356 const pass_data pass_data_omp_simd_clone =
12358 SIMPLE_IPA_PASS, /* type */
12359 "simdclone", /* name */
12360 OPTGROUP_NONE, /* optinfo_flags */
12361 TV_NONE, /* tv_id */
12362 ( PROP_ssa | PROP_cfg ), /* properties_required */
12363 0, /* properties_provided */
12364 0, /* properties_destroyed */
12365 0, /* todo_flags_start */
12366 0, /* todo_flags_finish */
12369 class pass_omp_simd_clone : public simple_ipa_opt_pass
12371 public:
12372 pass_omp_simd_clone(gcc::context *ctxt)
12373 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
12376 /* opt_pass methods: */
12377 virtual bool gate (function *);
12378 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
12381 bool
12382 pass_omp_simd_clone::gate (function *)
12384 return ((flag_openmp || flag_openmp_simd
12385 || flag_cilkplus
12386 || (in_lto_p && !flag_wpa))
12387 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
12390 } // anon namespace
12392 simple_ipa_opt_pass *
12393 make_pass_omp_simd_clone (gcc::context *ctxt)
12395 return new pass_omp_simd_clone (ctxt);
12398 #include "gt-omp-low.h"