* pt.c (lookup_template_class_1): Splice out abi_tag attribute if
[official-gcc.git] / gcc / omp-low.c
blobbe882f72628f09cfb36e36e82c90189e70a02860
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "rtl.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "gimple-fold.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-walk.h"
43 #include "tree-iterator.h"
44 #include "tree-inline.h"
45 #include "langhooks.h"
46 #include "diagnostic-core.h"
47 #include "gimple-ssa.h"
48 #include "cgraph.h"
49 #include "tree-cfg.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "tree-ssanames.h"
53 #include "tree-into-ssa.h"
54 #include "expr.h"
55 #include "tree-dfa.h"
56 #include "tree-ssa.h"
57 #include "flags.h"
58 #include "function.h"
59 #include "expr.h"
60 #include "tree-pass.h"
61 #include "except.h"
62 #include "splay-tree.h"
63 #include "optabs.h"
64 #include "cfgloop.h"
65 #include "target.h"
66 #include "omp-low.h"
67 #include "gimple-low.h"
68 #include "tree-cfgcleanup.h"
69 #include "pretty-print.h"
70 #include "ipa-prop.h"
71 #include "tree-nested.h"
72 #include "tree-eh.h"
73 #include "cilk.h"
76 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
77 phases. The first phase scans the function looking for OMP statements
78 and then for variables that must be replaced to satisfy data sharing
79 clauses. The second phase expands code for the constructs, as well as
80 re-gimplifying things when variables have been replaced with complex
81 expressions.
83 Final code generation is done by pass_expand_omp. The flowgraph is
84 scanned for parallel regions which are then moved to a new
85 function, to be invoked by the thread library. */
87 /* Parallel region information. Every parallel and workshare
88 directive is enclosed between two markers, the OMP_* directive
89 and a corresponding OMP_RETURN statement. */
91 struct omp_region
93 /* The enclosing region. */
94 struct omp_region *outer;
96 /* First child region. */
97 struct omp_region *inner;
99 /* Next peer region. */
100 struct omp_region *next;
102 /* Block containing the omp directive as its last stmt. */
103 basic_block entry;
105 /* Block containing the OMP_RETURN as its last stmt. */
106 basic_block exit;
108 /* Block containing the OMP_CONTINUE as its last stmt. */
109 basic_block cont;
111 /* If this is a combined parallel+workshare region, this is a list
112 of additional arguments needed by the combined parallel+workshare
113 library call. */
114 vec<tree, va_gc> *ws_args;
116 /* The code for the omp directive of this region. */
117 enum gimple_code type;
119 /* Schedule kind, only used for OMP_FOR type regions. */
120 enum omp_clause_schedule_kind sched_kind;
122 /* True if this is a combined parallel+workshare region. */
123 bool is_combined_parallel;
126 /* Context structure. Used to store information about each parallel
127 directive in the code. */
129 typedef struct omp_context
131 /* This field must be at the beginning, as we do "inheritance": Some
132 callback functions for tree-inline.c (e.g., omp_copy_decl)
133 receive a copy_body_data pointer that is up-casted to an
134 omp_context pointer. */
135 copy_body_data cb;
137 /* The tree of contexts corresponding to the encountered constructs. */
138 struct omp_context *outer;
139 gimple stmt;
141 /* Map variables to fields in a structure that allows communication
142 between sending and receiving threads. */
143 splay_tree field_map;
144 tree record_type;
145 tree sender_decl;
146 tree receiver_decl;
148 /* These are used just by task contexts, if task firstprivate fn is
149 needed. srecord_type is used to communicate from the thread
150 that encountered the task construct to task firstprivate fn,
151 record_type is allocated by GOMP_task, initialized by task firstprivate
152 fn and passed to the task body fn. */
153 splay_tree sfield_map;
154 tree srecord_type;
156 /* A chain of variables to add to the top-level block surrounding the
157 construct. In the case of a parallel, this is in the child function. */
158 tree block_vars;
160 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
161 barriers should jump to during omplower pass. */
162 tree cancel_label;
164 /* What to do with variables with implicitly determined sharing
165 attributes. */
166 enum omp_clause_default_kind default_kind;
168 /* Nesting depth of this context. Used to beautify error messages re
169 invalid gotos. The outermost ctx is depth 1, with depth 0 being
170 reserved for the main body of the function. */
171 int depth;
173 /* True if this parallel directive is nested within another. */
174 bool is_nested;
176 /* True if this construct can be cancelled. */
177 bool cancellable;
178 } omp_context;
181 struct omp_for_data_loop
183 tree v, n1, n2, step;
184 enum tree_code cond_code;
187 /* A structure describing the main elements of a parallel loop. */
189 struct omp_for_data
191 struct omp_for_data_loop loop;
192 tree chunk_size;
193 gimple for_stmt;
194 tree pre, iter_type;
195 int collapse;
196 bool have_nowait, have_ordered;
197 enum omp_clause_schedule_kind sched_kind;
198 struct omp_for_data_loop *loops;
202 static splay_tree all_contexts;
203 static int taskreg_nesting_level;
204 static int target_nesting_level;
205 static struct omp_region *root_omp_region;
206 static bitmap task_shared_vars;
208 static void scan_omp (gimple_seq *, omp_context *);
209 static tree scan_omp_1_op (tree *, int *, void *);
211 #define WALK_SUBSTMTS \
212 case GIMPLE_BIND: \
213 case GIMPLE_TRY: \
214 case GIMPLE_CATCH: \
215 case GIMPLE_EH_FILTER: \
216 case GIMPLE_TRANSACTION: \
217 /* The sub-statements for these should be walked. */ \
218 *handled_ops_p = false; \
219 break;
221 /* Convenience function for calling scan_omp_1_op on tree operands. */
223 static inline tree
224 scan_omp_op (tree *tp, omp_context *ctx)
226 struct walk_stmt_info wi;
228 memset (&wi, 0, sizeof (wi));
229 wi.info = ctx;
230 wi.want_locations = true;
232 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
235 static void lower_omp (gimple_seq *, omp_context *);
236 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
237 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
239 /* Find an OpenMP clause of type KIND within CLAUSES. */
241 tree
242 find_omp_clause (tree clauses, enum omp_clause_code kind)
244 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
245 if (OMP_CLAUSE_CODE (clauses) == kind)
246 return clauses;
248 return NULL_TREE;
251 /* Return true if CTX is for an omp parallel. */
253 static inline bool
254 is_parallel_ctx (omp_context *ctx)
256 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
260 /* Return true if CTX is for an omp task. */
262 static inline bool
263 is_task_ctx (omp_context *ctx)
265 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
269 /* Return true if CTX is for an omp parallel or omp task. */
271 static inline bool
272 is_taskreg_ctx (omp_context *ctx)
274 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
275 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
279 /* Return true if REGION is a combined parallel+workshare region. */
281 static inline bool
282 is_combined_parallel (struct omp_region *region)
284 return region->is_combined_parallel;
288 /* Extract the header elements of parallel loop FOR_STMT and store
289 them into *FD. */
291 static void
292 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
293 struct omp_for_data_loop *loops)
295 tree t, var, *collapse_iter, *collapse_count;
296 tree count = NULL_TREE, iter_type = long_integer_type_node;
297 struct omp_for_data_loop *loop;
298 int i;
299 struct omp_for_data_loop dummy_loop;
300 location_t loc = gimple_location (for_stmt);
301 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
302 bool distribute = gimple_omp_for_kind (for_stmt)
303 == GF_OMP_FOR_KIND_DISTRIBUTE;
305 fd->for_stmt = for_stmt;
306 fd->pre = NULL;
307 fd->collapse = gimple_omp_for_collapse (for_stmt);
308 if (fd->collapse > 1)
309 fd->loops = loops;
310 else
311 fd->loops = &fd->loop;
313 fd->have_nowait = distribute || simd;
314 fd->have_ordered = false;
315 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
316 fd->chunk_size = NULL_TREE;
317 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
318 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
319 collapse_iter = NULL;
320 collapse_count = NULL;
322 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
323 switch (OMP_CLAUSE_CODE (t))
325 case OMP_CLAUSE_NOWAIT:
326 fd->have_nowait = true;
327 break;
328 case OMP_CLAUSE_ORDERED:
329 fd->have_ordered = true;
330 break;
331 case OMP_CLAUSE_SCHEDULE:
332 gcc_assert (!distribute);
333 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
334 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
335 break;
336 case OMP_CLAUSE_DIST_SCHEDULE:
337 gcc_assert (distribute);
338 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
339 break;
340 case OMP_CLAUSE_COLLAPSE:
341 if (fd->collapse > 1)
343 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
344 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
346 break;
347 default:
348 break;
351 /* FIXME: for now map schedule(auto) to schedule(static).
352 There should be analysis to determine whether all iterations
353 are approximately the same amount of work (then schedule(static)
354 is best) or if it varies (then schedule(dynamic,N) is better). */
355 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
357 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
358 gcc_assert (fd->chunk_size == NULL);
360 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
361 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
362 gcc_assert (fd->chunk_size == NULL);
363 else if (fd->chunk_size == NULL)
365 /* We only need to compute a default chunk size for ordered
366 static loops and dynamic loops. */
367 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
368 || fd->have_ordered)
369 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
370 ? integer_zero_node : integer_one_node;
373 for (i = 0; i < fd->collapse; i++)
375 if (fd->collapse == 1)
376 loop = &fd->loop;
377 else if (loops != NULL)
378 loop = loops + i;
379 else
380 loop = &dummy_loop;
382 loop->v = gimple_omp_for_index (for_stmt, i);
383 gcc_assert (SSA_VAR_P (loop->v));
384 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
385 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
386 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
387 loop->n1 = gimple_omp_for_initial (for_stmt, i);
389 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
390 loop->n2 = gimple_omp_for_final (for_stmt, i);
391 switch (loop->cond_code)
393 case LT_EXPR:
394 case GT_EXPR:
395 break;
396 case NE_EXPR:
397 gcc_assert (gimple_omp_for_kind (for_stmt)
398 == GF_OMP_FOR_KIND_CILKSIMD
399 || (gimple_omp_for_kind (for_stmt)
400 == GF_OMP_FOR_KIND_CILKFOR));
401 break;
402 case LE_EXPR:
403 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
404 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
405 else
406 loop->n2 = fold_build2_loc (loc,
407 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
408 build_int_cst (TREE_TYPE (loop->n2), 1));
409 loop->cond_code = LT_EXPR;
410 break;
411 case GE_EXPR:
412 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
413 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
414 else
415 loop->n2 = fold_build2_loc (loc,
416 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
417 build_int_cst (TREE_TYPE (loop->n2), 1));
418 loop->cond_code = GT_EXPR;
419 break;
420 default:
421 gcc_unreachable ();
424 t = gimple_omp_for_incr (for_stmt, i);
425 gcc_assert (TREE_OPERAND (t, 0) == var);
426 switch (TREE_CODE (t))
428 case PLUS_EXPR:
429 loop->step = TREE_OPERAND (t, 1);
430 break;
431 case POINTER_PLUS_EXPR:
432 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
433 break;
434 case MINUS_EXPR:
435 loop->step = TREE_OPERAND (t, 1);
436 loop->step = fold_build1_loc (loc,
437 NEGATE_EXPR, TREE_TYPE (loop->step),
438 loop->step);
439 break;
440 default:
441 gcc_unreachable ();
444 if (simd
445 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
446 && !fd->have_ordered))
448 if (fd->collapse == 1)
449 iter_type = TREE_TYPE (loop->v);
450 else if (i == 0
451 || TYPE_PRECISION (iter_type)
452 < TYPE_PRECISION (TREE_TYPE (loop->v)))
453 iter_type
454 = build_nonstandard_integer_type
455 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
457 else if (iter_type != long_long_unsigned_type_node)
459 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
460 iter_type = long_long_unsigned_type_node;
461 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
462 && TYPE_PRECISION (TREE_TYPE (loop->v))
463 >= TYPE_PRECISION (iter_type))
465 tree n;
467 if (loop->cond_code == LT_EXPR)
468 n = fold_build2_loc (loc,
469 PLUS_EXPR, TREE_TYPE (loop->v),
470 loop->n2, loop->step);
471 else
472 n = loop->n1;
473 if (TREE_CODE (n) != INTEGER_CST
474 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
475 iter_type = long_long_unsigned_type_node;
477 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
478 > TYPE_PRECISION (iter_type))
480 tree n1, n2;
482 if (loop->cond_code == LT_EXPR)
484 n1 = loop->n1;
485 n2 = fold_build2_loc (loc,
486 PLUS_EXPR, TREE_TYPE (loop->v),
487 loop->n2, loop->step);
489 else
491 n1 = fold_build2_loc (loc,
492 MINUS_EXPR, TREE_TYPE (loop->v),
493 loop->n2, loop->step);
494 n2 = loop->n1;
496 if (TREE_CODE (n1) != INTEGER_CST
497 || TREE_CODE (n2) != INTEGER_CST
498 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
499 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
500 iter_type = long_long_unsigned_type_node;
504 if (collapse_count && *collapse_count == NULL)
506 t = fold_binary (loop->cond_code, boolean_type_node,
507 fold_convert (TREE_TYPE (loop->v), loop->n1),
508 fold_convert (TREE_TYPE (loop->v), loop->n2));
509 if (t && integer_zerop (t))
510 count = build_zero_cst (long_long_unsigned_type_node);
511 else if ((i == 0 || count != NULL_TREE)
512 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
513 && TREE_CONSTANT (loop->n1)
514 && TREE_CONSTANT (loop->n2)
515 && TREE_CODE (loop->step) == INTEGER_CST)
517 tree itype = TREE_TYPE (loop->v);
519 if (POINTER_TYPE_P (itype))
520 itype = signed_type_for (itype);
521 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
522 t = fold_build2_loc (loc,
523 PLUS_EXPR, itype,
524 fold_convert_loc (loc, itype, loop->step), t);
525 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
526 fold_convert_loc (loc, itype, loop->n2));
527 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
528 fold_convert_loc (loc, itype, loop->n1));
529 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
530 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
531 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
532 fold_build1_loc (loc, NEGATE_EXPR, itype,
533 fold_convert_loc (loc, itype,
534 loop->step)));
535 else
536 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
537 fold_convert_loc (loc, itype, loop->step));
538 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
539 if (count != NULL_TREE)
540 count = fold_build2_loc (loc,
541 MULT_EXPR, long_long_unsigned_type_node,
542 count, t);
543 else
544 count = t;
545 if (TREE_CODE (count) != INTEGER_CST)
546 count = NULL_TREE;
548 else if (count && !integer_zerop (count))
549 count = NULL_TREE;
553 if (count
554 && !simd
555 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
556 || fd->have_ordered))
558 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
559 iter_type = long_long_unsigned_type_node;
560 else
561 iter_type = long_integer_type_node;
563 else if (collapse_iter && *collapse_iter != NULL)
564 iter_type = TREE_TYPE (*collapse_iter);
565 fd->iter_type = iter_type;
566 if (collapse_iter && *collapse_iter == NULL)
567 *collapse_iter = create_tmp_var (iter_type, ".iter");
568 if (collapse_count && *collapse_count == NULL)
570 if (count)
571 *collapse_count = fold_convert_loc (loc, iter_type, count);
572 else
573 *collapse_count = create_tmp_var (iter_type, ".count");
576 if (fd->collapse > 1)
578 fd->loop.v = *collapse_iter;
579 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
580 fd->loop.n2 = *collapse_count;
581 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
582 fd->loop.cond_code = LT_EXPR;
587 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
588 is the immediate dominator of PAR_ENTRY_BB, return true if there
589 are no data dependencies that would prevent expanding the parallel
590 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
592 When expanding a combined parallel+workshare region, the call to
593 the child function may need additional arguments in the case of
594 GIMPLE_OMP_FOR regions. In some cases, these arguments are
595 computed out of variables passed in from the parent to the child
596 via 'struct .omp_data_s'. For instance:
598 #pragma omp parallel for schedule (guided, i * 4)
599 for (j ...)
601 Is lowered into:
603 # BLOCK 2 (PAR_ENTRY_BB)
604 .omp_data_o.i = i;
605 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
607 # BLOCK 3 (WS_ENTRY_BB)
608 .omp_data_i = &.omp_data_o;
609 D.1667 = .omp_data_i->i;
610 D.1598 = D.1667 * 4;
611 #pragma omp for schedule (guided, D.1598)
613 When we outline the parallel region, the call to the child function
614 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
615 that value is computed *after* the call site. So, in principle we
616 cannot do the transformation.
618 To see whether the code in WS_ENTRY_BB blocks the combined
619 parallel+workshare call, we collect all the variables used in the
620 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
621 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
622 call.
624 FIXME. If we had the SSA form built at this point, we could merely
625 hoist the code in block 3 into block 2 and be done with it. But at
626 this point we don't have dataflow information and though we could
627 hack something up here, it is really not worth the aggravation. */
629 static bool
630 workshare_safe_to_combine_p (basic_block ws_entry_bb)
632 struct omp_for_data fd;
633 gimple ws_stmt = last_stmt (ws_entry_bb);
635 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
636 return true;
638 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
640 extract_omp_for_data (ws_stmt, &fd, NULL);
642 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
643 return false;
644 if (fd.iter_type != long_integer_type_node)
645 return false;
647 /* FIXME. We give up too easily here. If any of these arguments
648 are not constants, they will likely involve variables that have
649 been mapped into fields of .omp_data_s for sharing with the child
650 function. With appropriate data flow, it would be possible to
651 see through this. */
652 if (!is_gimple_min_invariant (fd.loop.n1)
653 || !is_gimple_min_invariant (fd.loop.n2)
654 || !is_gimple_min_invariant (fd.loop.step)
655 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
656 return false;
658 return true;
662 /* Collect additional arguments needed to emit a combined
663 parallel+workshare call. WS_STMT is the workshare directive being
664 expanded. */
666 static vec<tree, va_gc> *
667 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
669 tree t;
670 location_t loc = gimple_location (ws_stmt);
671 vec<tree, va_gc> *ws_args;
673 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
675 struct omp_for_data fd;
676 tree n1, n2;
678 extract_omp_for_data (ws_stmt, &fd, NULL);
679 n1 = fd.loop.n1;
680 n2 = fd.loop.n2;
682 if (gimple_omp_for_combined_into_p (ws_stmt))
684 tree innerc
685 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
686 OMP_CLAUSE__LOOPTEMP_);
687 gcc_assert (innerc);
688 n1 = OMP_CLAUSE_DECL (innerc);
689 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
690 OMP_CLAUSE__LOOPTEMP_);
691 gcc_assert (innerc);
692 n2 = OMP_CLAUSE_DECL (innerc);
695 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
697 t = fold_convert_loc (loc, long_integer_type_node, n1);
698 ws_args->quick_push (t);
700 t = fold_convert_loc (loc, long_integer_type_node, n2);
701 ws_args->quick_push (t);
703 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
704 ws_args->quick_push (t);
706 if (fd.chunk_size)
708 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
709 ws_args->quick_push (t);
712 return ws_args;
714 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
716 /* Number of sections is equal to the number of edges from the
717 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
718 the exit of the sections region. */
719 basic_block bb = single_succ (gimple_bb (ws_stmt));
720 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
721 vec_alloc (ws_args, 1);
722 ws_args->quick_push (t);
723 return ws_args;
726 gcc_unreachable ();
730 /* Discover whether REGION is a combined parallel+workshare region. */
732 static void
733 determine_parallel_type (struct omp_region *region)
735 basic_block par_entry_bb, par_exit_bb;
736 basic_block ws_entry_bb, ws_exit_bb;
738 if (region == NULL || region->inner == NULL
739 || region->exit == NULL || region->inner->exit == NULL
740 || region->inner->cont == NULL)
741 return;
743 /* We only support parallel+for and parallel+sections. */
744 if (region->type != GIMPLE_OMP_PARALLEL
745 || (region->inner->type != GIMPLE_OMP_FOR
746 && region->inner->type != GIMPLE_OMP_SECTIONS))
747 return;
749 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
750 WS_EXIT_BB -> PAR_EXIT_BB. */
751 par_entry_bb = region->entry;
752 par_exit_bb = region->exit;
753 ws_entry_bb = region->inner->entry;
754 ws_exit_bb = region->inner->exit;
756 if (single_succ (par_entry_bb) == ws_entry_bb
757 && single_succ (ws_exit_bb) == par_exit_bb
758 && workshare_safe_to_combine_p (ws_entry_bb)
759 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
760 || (last_and_only_stmt (ws_entry_bb)
761 && last_and_only_stmt (par_exit_bb))))
763 gimple par_stmt = last_stmt (par_entry_bb);
764 gimple ws_stmt = last_stmt (ws_entry_bb);
766 if (region->inner->type == GIMPLE_OMP_FOR)
768 /* If this is a combined parallel loop, we need to determine
769 whether or not to use the combined library calls. There
770 are two cases where we do not apply the transformation:
771 static loops and any kind of ordered loop. In the first
772 case, we already open code the loop so there is no need
773 to do anything else. In the latter case, the combined
774 parallel loop call would still need extra synchronization
775 to implement ordered semantics, so there would not be any
776 gain in using the combined call. */
777 tree clauses = gimple_omp_for_clauses (ws_stmt);
778 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
779 if (c == NULL
780 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
781 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
783 region->is_combined_parallel = false;
784 region->inner->is_combined_parallel = false;
785 return;
789 region->is_combined_parallel = true;
790 region->inner->is_combined_parallel = true;
791 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
796 /* Return true if EXPR is variable sized. */
798 static inline bool
799 is_variable_sized (const_tree expr)
801 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
804 /* Return true if DECL is a reference type. */
806 static inline bool
807 is_reference (tree decl)
809 return lang_hooks.decls.omp_privatize_by_reference (decl);
812 /* Lookup variables in the decl or field splay trees. The "maybe" form
813 allows for the variable form to not have been entered, otherwise we
814 assert that the variable must have been entered. */
816 static inline tree
817 lookup_decl (tree var, omp_context *ctx)
819 tree *n = ctx->cb.decl_map->get (var);
820 return *n;
823 static inline tree
824 maybe_lookup_decl (const_tree var, omp_context *ctx)
826 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
827 return n ? *n : NULL_TREE;
830 static inline tree
831 lookup_field (tree var, omp_context *ctx)
833 splay_tree_node n;
834 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
835 return (tree) n->value;
838 static inline tree
839 lookup_sfield (tree var, omp_context *ctx)
841 splay_tree_node n;
842 n = splay_tree_lookup (ctx->sfield_map
843 ? ctx->sfield_map : ctx->field_map,
844 (splay_tree_key) var);
845 return (tree) n->value;
848 static inline tree
849 maybe_lookup_field (tree var, omp_context *ctx)
851 splay_tree_node n;
852 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
853 return n ? (tree) n->value : NULL_TREE;
856 /* Return true if DECL should be copied by pointer. SHARED_CTX is
857 the parallel context if DECL is to be shared. */
859 static bool
860 use_pointer_for_field (tree decl, omp_context *shared_ctx)
862 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
863 return true;
865 /* We can only use copy-in/copy-out semantics for shared variables
866 when we know the value is not accessible from an outer scope. */
867 if (shared_ctx)
869 /* ??? Trivially accessible from anywhere. But why would we even
870 be passing an address in this case? Should we simply assert
871 this to be false, or should we have a cleanup pass that removes
872 these from the list of mappings? */
873 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
874 return true;
876 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
877 without analyzing the expression whether or not its location
878 is accessible to anyone else. In the case of nested parallel
879 regions it certainly may be. */
880 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
881 return true;
883 /* Do not use copy-in/copy-out for variables that have their
884 address taken. */
885 if (TREE_ADDRESSABLE (decl))
886 return true;
888 /* lower_send_shared_vars only uses copy-in, but not copy-out
889 for these. */
890 if (TREE_READONLY (decl)
891 || ((TREE_CODE (decl) == RESULT_DECL
892 || TREE_CODE (decl) == PARM_DECL)
893 && DECL_BY_REFERENCE (decl)))
894 return false;
896 /* Disallow copy-in/out in nested parallel if
897 decl is shared in outer parallel, otherwise
898 each thread could store the shared variable
899 in its own copy-in location, making the
900 variable no longer really shared. */
901 if (shared_ctx->is_nested)
903 omp_context *up;
905 for (up = shared_ctx->outer; up; up = up->outer)
906 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
907 break;
909 if (up)
911 tree c;
913 for (c = gimple_omp_taskreg_clauses (up->stmt);
914 c; c = OMP_CLAUSE_CHAIN (c))
915 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
916 && OMP_CLAUSE_DECL (c) == decl)
917 break;
919 if (c)
920 goto maybe_mark_addressable_and_ret;
924 /* For tasks avoid using copy-in/out. As tasks can be
925 deferred or executed in different thread, when GOMP_task
926 returns, the task hasn't necessarily terminated. */
927 if (is_task_ctx (shared_ctx))
929 tree outer;
930 maybe_mark_addressable_and_ret:
931 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
932 if (is_gimple_reg (outer))
934 /* Taking address of OUTER in lower_send_shared_vars
935 might need regimplification of everything that uses the
936 variable. */
937 if (!task_shared_vars)
938 task_shared_vars = BITMAP_ALLOC (NULL);
939 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
940 TREE_ADDRESSABLE (outer) = 1;
942 return true;
946 return false;
949 /* Construct a new automatic decl similar to VAR. */
951 static tree
952 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
954 tree copy = copy_var_decl (var, name, type);
956 DECL_CONTEXT (copy) = current_function_decl;
957 DECL_CHAIN (copy) = ctx->block_vars;
958 ctx->block_vars = copy;
960 return copy;
963 static tree
964 omp_copy_decl_1 (tree var, omp_context *ctx)
966 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
969 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
970 as appropriate. */
971 static tree
972 omp_build_component_ref (tree obj, tree field)
974 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
975 if (TREE_THIS_VOLATILE (field))
976 TREE_THIS_VOLATILE (ret) |= 1;
977 if (TREE_READONLY (field))
978 TREE_READONLY (ret) |= 1;
979 return ret;
982 /* Build tree nodes to access the field for VAR on the receiver side. */
984 static tree
985 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
987 tree x, field = lookup_field (var, ctx);
989 /* If the receiver record type was remapped in the child function,
990 remap the field into the new record type. */
991 x = maybe_lookup_field (field, ctx);
992 if (x != NULL)
993 field = x;
995 x = build_simple_mem_ref (ctx->receiver_decl);
996 x = omp_build_component_ref (x, field);
997 if (by_ref)
998 x = build_simple_mem_ref (x);
1000 return x;
1003 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1004 of a parallel, this is a component reference; for workshare constructs
1005 this is some variable. */
1007 static tree
1008 build_outer_var_ref (tree var, omp_context *ctx)
1010 tree x;
1012 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1013 x = var;
1014 else if (is_variable_sized (var))
1016 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1017 x = build_outer_var_ref (x, ctx);
1018 x = build_simple_mem_ref (x);
1020 else if (is_taskreg_ctx (ctx))
1022 bool by_ref = use_pointer_for_field (var, NULL);
1023 x = build_receiver_ref (var, by_ref, ctx);
1025 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1026 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1028 /* #pragma omp simd isn't a worksharing construct, and can reference even
1029 private vars in its linear etc. clauses. */
1030 x = NULL_TREE;
1031 if (ctx->outer && is_taskreg_ctx (ctx))
1032 x = lookup_decl (var, ctx->outer);
1033 else if (ctx->outer)
1034 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1035 if (x == NULL_TREE)
1036 x = var;
1038 else if (ctx->outer)
1039 x = lookup_decl (var, ctx->outer);
1040 else if (is_reference (var))
1041 /* This can happen with orphaned constructs. If var is reference, it is
1042 possible it is shared and as such valid. */
1043 x = var;
1044 else
1045 gcc_unreachable ();
1047 if (is_reference (var))
1048 x = build_simple_mem_ref (x);
1050 return x;
1053 /* Build tree nodes to access the field for VAR on the sender side. */
1055 static tree
1056 build_sender_ref (tree var, omp_context *ctx)
1058 tree field = lookup_sfield (var, ctx);
1059 return omp_build_component_ref (ctx->sender_decl, field);
1062 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1064 static void
1065 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1067 tree field, type, sfield = NULL_TREE;
1069 gcc_assert ((mask & 1) == 0
1070 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1071 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1072 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1074 type = TREE_TYPE (var);
1075 if (mask & 4)
1077 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1078 type = build_pointer_type (build_pointer_type (type));
1080 else if (by_ref)
1081 type = build_pointer_type (type);
1082 else if ((mask & 3) == 1 && is_reference (var))
1083 type = TREE_TYPE (type);
1085 field = build_decl (DECL_SOURCE_LOCATION (var),
1086 FIELD_DECL, DECL_NAME (var), type);
1088 /* Remember what variable this field was created for. This does have a
1089 side effect of making dwarf2out ignore this member, so for helpful
1090 debugging we clear it later in delete_omp_context. */
1091 DECL_ABSTRACT_ORIGIN (field) = var;
1092 if (type == TREE_TYPE (var))
1094 DECL_ALIGN (field) = DECL_ALIGN (var);
1095 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1096 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1098 else
1099 DECL_ALIGN (field) = TYPE_ALIGN (type);
1101 if ((mask & 3) == 3)
1103 insert_field_into_struct (ctx->record_type, field);
1104 if (ctx->srecord_type)
1106 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1107 FIELD_DECL, DECL_NAME (var), type);
1108 DECL_ABSTRACT_ORIGIN (sfield) = var;
1109 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1110 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1111 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1112 insert_field_into_struct (ctx->srecord_type, sfield);
1115 else
1117 if (ctx->srecord_type == NULL_TREE)
1119 tree t;
1121 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1122 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1123 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1125 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1126 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1127 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1128 insert_field_into_struct (ctx->srecord_type, sfield);
1129 splay_tree_insert (ctx->sfield_map,
1130 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1131 (splay_tree_value) sfield);
1134 sfield = field;
1135 insert_field_into_struct ((mask & 1) ? ctx->record_type
1136 : ctx->srecord_type, field);
1139 if (mask & 1)
1140 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1141 (splay_tree_value) field);
1142 if ((mask & 2) && ctx->sfield_map)
1143 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1144 (splay_tree_value) sfield);
1147 static tree
1148 install_var_local (tree var, omp_context *ctx)
1150 tree new_var = omp_copy_decl_1 (var, ctx);
1151 insert_decl_map (&ctx->cb, var, new_var);
1152 return new_var;
1155 /* Adjust the replacement for DECL in CTX for the new context. This means
1156 copying the DECL_VALUE_EXPR, and fixing up the type. */
1158 static void
1159 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1161 tree new_decl, size;
1163 new_decl = lookup_decl (decl, ctx);
1165 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1167 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1168 && DECL_HAS_VALUE_EXPR_P (decl))
1170 tree ve = DECL_VALUE_EXPR (decl);
1171 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1172 SET_DECL_VALUE_EXPR (new_decl, ve);
1173 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1176 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1178 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1179 if (size == error_mark_node)
1180 size = TYPE_SIZE (TREE_TYPE (new_decl));
1181 DECL_SIZE (new_decl) = size;
1183 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1184 if (size == error_mark_node)
1185 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1186 DECL_SIZE_UNIT (new_decl) = size;
1190 /* The callback for remap_decl. Search all containing contexts for a
1191 mapping of the variable; this avoids having to duplicate the splay
1192 tree ahead of time. We know a mapping doesn't already exist in the
1193 given context. Create new mappings to implement default semantics. */
1195 static tree
1196 omp_copy_decl (tree var, copy_body_data *cb)
1198 omp_context *ctx = (omp_context *) cb;
1199 tree new_var;
1201 if (TREE_CODE (var) == LABEL_DECL)
1203 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1204 DECL_CONTEXT (new_var) = current_function_decl;
1205 insert_decl_map (&ctx->cb, var, new_var);
1206 return new_var;
1209 while (!is_taskreg_ctx (ctx))
1211 ctx = ctx->outer;
1212 if (ctx == NULL)
1213 return var;
1214 new_var = maybe_lookup_decl (var, ctx);
1215 if (new_var)
1216 return new_var;
1219 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1220 return var;
1222 return error_mark_node;
1226 /* Debugging dumps for parallel regions. */
1227 void dump_omp_region (FILE *, struct omp_region *, int);
1228 void debug_omp_region (struct omp_region *);
1229 void debug_all_omp_regions (void);
1231 /* Dump the parallel region tree rooted at REGION. */
1233 void
1234 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1236 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1237 gimple_code_name[region->type]);
1239 if (region->inner)
1240 dump_omp_region (file, region->inner, indent + 4);
1242 if (region->cont)
1244 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1245 region->cont->index);
1248 if (region->exit)
1249 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1250 region->exit->index);
1251 else
1252 fprintf (file, "%*s[no exit marker]\n", indent, "");
1254 if (region->next)
1255 dump_omp_region (file, region->next, indent);
1258 DEBUG_FUNCTION void
1259 debug_omp_region (struct omp_region *region)
1261 dump_omp_region (stderr, region, 0);
1264 DEBUG_FUNCTION void
1265 debug_all_omp_regions (void)
1267 dump_omp_region (stderr, root_omp_region, 0);
1271 /* Create a new parallel region starting at STMT inside region PARENT. */
1273 static struct omp_region *
1274 new_omp_region (basic_block bb, enum gimple_code type,
1275 struct omp_region *parent)
1277 struct omp_region *region = XCNEW (struct omp_region);
1279 region->outer = parent;
1280 region->entry = bb;
1281 region->type = type;
1283 if (parent)
1285 /* This is a nested region. Add it to the list of inner
1286 regions in PARENT. */
1287 region->next = parent->inner;
1288 parent->inner = region;
1290 else
1292 /* This is a toplevel region. Add it to the list of toplevel
1293 regions in ROOT_OMP_REGION. */
1294 region->next = root_omp_region;
1295 root_omp_region = region;
1298 return region;
1301 /* Release the memory associated with the region tree rooted at REGION. */
1303 static void
1304 free_omp_region_1 (struct omp_region *region)
1306 struct omp_region *i, *n;
1308 for (i = region->inner; i ; i = n)
1310 n = i->next;
1311 free_omp_region_1 (i);
1314 free (region);
1317 /* Release the memory for the entire omp region tree. */
1319 void
1320 free_omp_regions (void)
1322 struct omp_region *r, *n;
1323 for (r = root_omp_region; r ; r = n)
1325 n = r->next;
1326 free_omp_region_1 (r);
1328 root_omp_region = NULL;
1332 /* Create a new context, with OUTER_CTX being the surrounding context. */
1334 static omp_context *
1335 new_omp_context (gimple stmt, omp_context *outer_ctx)
1337 omp_context *ctx = XCNEW (omp_context);
1339 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1340 (splay_tree_value) ctx);
1341 ctx->stmt = stmt;
1343 if (outer_ctx)
1345 ctx->outer = outer_ctx;
1346 ctx->cb = outer_ctx->cb;
1347 ctx->cb.block = NULL;
1348 ctx->depth = outer_ctx->depth + 1;
1350 else
1352 ctx->cb.src_fn = current_function_decl;
1353 ctx->cb.dst_fn = current_function_decl;
1354 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1355 gcc_checking_assert (ctx->cb.src_node);
1356 ctx->cb.dst_node = ctx->cb.src_node;
1357 ctx->cb.src_cfun = cfun;
1358 ctx->cb.copy_decl = omp_copy_decl;
1359 ctx->cb.eh_lp_nr = 0;
1360 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1361 ctx->depth = 1;
1364 ctx->cb.decl_map = new hash_map<tree, tree>;
1366 return ctx;
1369 static gimple_seq maybe_catch_exception (gimple_seq);
1371 /* Finalize task copyfn. */
1373 static void
1374 finalize_task_copyfn (gimple task_stmt)
1376 struct function *child_cfun;
1377 tree child_fn;
1378 gimple_seq seq = NULL, new_seq;
1379 gimple bind;
1381 child_fn = gimple_omp_task_copy_fn (task_stmt);
1382 if (child_fn == NULL_TREE)
1383 return;
1385 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1386 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1388 push_cfun (child_cfun);
1389 bind = gimplify_body (child_fn, false);
1390 gimple_seq_add_stmt (&seq, bind);
1391 new_seq = maybe_catch_exception (seq);
1392 if (new_seq != seq)
1394 bind = gimple_build_bind (NULL, new_seq, NULL);
1395 seq = NULL;
1396 gimple_seq_add_stmt (&seq, bind);
1398 gimple_set_body (child_fn, seq);
1399 pop_cfun ();
1401 /* Inform the callgraph about the new function. */
1402 cgraph_node::add_new_function (child_fn, false);
1405 /* Destroy a omp_context data structures. Called through the splay tree
1406 value delete callback. */
1408 static void
1409 delete_omp_context (splay_tree_value value)
1411 omp_context *ctx = (omp_context *) value;
1413 delete ctx->cb.decl_map;
1415 if (ctx->field_map)
1416 splay_tree_delete (ctx->field_map);
1417 if (ctx->sfield_map)
1418 splay_tree_delete (ctx->sfield_map);
1420 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1421 it produces corrupt debug information. */
1422 if (ctx->record_type)
1424 tree t;
1425 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1426 DECL_ABSTRACT_ORIGIN (t) = NULL;
1428 if (ctx->srecord_type)
1430 tree t;
1431 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1432 DECL_ABSTRACT_ORIGIN (t) = NULL;
1435 if (is_task_ctx (ctx))
1436 finalize_task_copyfn (ctx->stmt);
1438 XDELETE (ctx);
1441 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1442 context. */
1444 static void
1445 fixup_child_record_type (omp_context *ctx)
1447 tree f, type = ctx->record_type;
1449 /* ??? It isn't sufficient to just call remap_type here, because
1450 variably_modified_type_p doesn't work the way we expect for
1451 record types. Testing each field for whether it needs remapping
1452 and creating a new record by hand works, however. */
1453 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1454 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1455 break;
1456 if (f)
1458 tree name, new_fields = NULL;
1460 type = lang_hooks.types.make_type (RECORD_TYPE);
1461 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1462 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1463 TYPE_DECL, name, type);
1464 TYPE_NAME (type) = name;
1466 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1468 tree new_f = copy_node (f);
1469 DECL_CONTEXT (new_f) = type;
1470 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1471 DECL_CHAIN (new_f) = new_fields;
1472 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1473 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1474 &ctx->cb, NULL);
1475 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1476 &ctx->cb, NULL);
1477 new_fields = new_f;
1479 /* Arrange to be able to look up the receiver field
1480 given the sender field. */
1481 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1482 (splay_tree_value) new_f);
1484 TYPE_FIELDS (type) = nreverse (new_fields);
1485 layout_type (type);
1488 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1491 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1492 specified by CLAUSES. */
1494 static void
1495 scan_sharing_clauses (tree clauses, omp_context *ctx)
1497 tree c, decl;
1498 bool scan_array_reductions = false;
1500 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1502 bool by_ref;
1504 switch (OMP_CLAUSE_CODE (c))
1506 case OMP_CLAUSE_PRIVATE:
1507 decl = OMP_CLAUSE_DECL (c);
1508 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1509 goto do_private;
1510 else if (!is_variable_sized (decl))
1511 install_var_local (decl, ctx);
1512 break;
1514 case OMP_CLAUSE_SHARED:
1515 decl = OMP_CLAUSE_DECL (c);
1516 /* Ignore shared directives in teams construct. */
1517 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1519 /* Global variables don't need to be copied,
1520 the receiver side will use them directly. */
1521 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1522 if (is_global_var (odecl))
1523 break;
1524 insert_decl_map (&ctx->cb, decl, odecl);
1525 break;
1527 gcc_assert (is_taskreg_ctx (ctx));
1528 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1529 || !is_variable_sized (decl));
1530 /* Global variables don't need to be copied,
1531 the receiver side will use them directly. */
1532 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1533 break;
1534 by_ref = use_pointer_for_field (decl, ctx);
1535 if (! TREE_READONLY (decl)
1536 || TREE_ADDRESSABLE (decl)
1537 || by_ref
1538 || is_reference (decl))
1540 install_var_field (decl, by_ref, 3, ctx);
1541 install_var_local (decl, ctx);
1542 break;
1544 /* We don't need to copy const scalar vars back. */
1545 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1546 goto do_private;
1548 case OMP_CLAUSE_LASTPRIVATE:
1549 /* Let the corresponding firstprivate clause create
1550 the variable. */
1551 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1552 break;
1553 /* FALLTHRU */
1555 case OMP_CLAUSE_FIRSTPRIVATE:
1556 case OMP_CLAUSE_REDUCTION:
1557 case OMP_CLAUSE_LINEAR:
1558 decl = OMP_CLAUSE_DECL (c);
1559 do_private:
1560 if (is_variable_sized (decl))
1562 if (is_task_ctx (ctx))
1563 install_var_field (decl, false, 1, ctx);
1564 break;
1566 else if (is_taskreg_ctx (ctx))
1568 bool global
1569 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1570 by_ref = use_pointer_for_field (decl, NULL);
1572 if (is_task_ctx (ctx)
1573 && (global || by_ref || is_reference (decl)))
1575 install_var_field (decl, false, 1, ctx);
1576 if (!global)
1577 install_var_field (decl, by_ref, 2, ctx);
1579 else if (!global)
1580 install_var_field (decl, by_ref, 3, ctx);
1582 install_var_local (decl, ctx);
1583 break;
1585 case OMP_CLAUSE__LOOPTEMP_:
1586 gcc_assert (is_parallel_ctx (ctx));
1587 decl = OMP_CLAUSE_DECL (c);
1588 install_var_field (decl, false, 3, ctx);
1589 install_var_local (decl, ctx);
1590 break;
1592 case OMP_CLAUSE_COPYPRIVATE:
1593 case OMP_CLAUSE_COPYIN:
1594 decl = OMP_CLAUSE_DECL (c);
1595 by_ref = use_pointer_for_field (decl, NULL);
1596 install_var_field (decl, by_ref, 3, ctx);
1597 break;
1599 case OMP_CLAUSE_DEFAULT:
1600 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1601 break;
1603 case OMP_CLAUSE_FINAL:
1604 case OMP_CLAUSE_IF:
1605 case OMP_CLAUSE_NUM_THREADS:
1606 case OMP_CLAUSE_NUM_TEAMS:
1607 case OMP_CLAUSE_THREAD_LIMIT:
1608 case OMP_CLAUSE_DEVICE:
1609 case OMP_CLAUSE_SCHEDULE:
1610 case OMP_CLAUSE_DIST_SCHEDULE:
1611 case OMP_CLAUSE_DEPEND:
1612 case OMP_CLAUSE__CILK_FOR_COUNT_:
1613 if (ctx->outer)
1614 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1615 break;
1617 case OMP_CLAUSE_TO:
1618 case OMP_CLAUSE_FROM:
1619 case OMP_CLAUSE_MAP:
1620 if (ctx->outer)
1621 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1622 decl = OMP_CLAUSE_DECL (c);
1623 /* Global variables with "omp declare target" attribute
1624 don't need to be copied, the receiver side will use them
1625 directly. */
1626 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1627 && DECL_P (decl)
1628 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1629 && lookup_attribute ("omp declare target",
1630 DECL_ATTRIBUTES (decl)))
1631 break;
1632 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1633 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1635 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1636 #pragma omp target data, there is nothing to map for
1637 those. */
1638 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1639 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1640 break;
1642 if (DECL_P (decl))
1644 if (DECL_SIZE (decl)
1645 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1647 tree decl2 = DECL_VALUE_EXPR (decl);
1648 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1649 decl2 = TREE_OPERAND (decl2, 0);
1650 gcc_assert (DECL_P (decl2));
1651 install_var_field (decl2, true, 3, ctx);
1652 install_var_local (decl2, ctx);
1653 install_var_local (decl, ctx);
1655 else
1657 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1658 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1659 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1660 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1661 install_var_field (decl, true, 7, ctx);
1662 else
1663 install_var_field (decl, true, 3, ctx);
1664 if (gimple_omp_target_kind (ctx->stmt)
1665 == GF_OMP_TARGET_KIND_REGION)
1666 install_var_local (decl, ctx);
1669 else
1671 tree base = get_base_address (decl);
1672 tree nc = OMP_CLAUSE_CHAIN (c);
1673 if (DECL_P (base)
1674 && nc != NULL_TREE
1675 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1676 && OMP_CLAUSE_DECL (nc) == base
1677 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1678 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1680 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1681 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1683 else
1685 if (ctx->outer)
1687 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1688 decl = OMP_CLAUSE_DECL (c);
1690 gcc_assert (!splay_tree_lookup (ctx->field_map,
1691 (splay_tree_key) decl));
1692 tree field
1693 = build_decl (OMP_CLAUSE_LOCATION (c),
1694 FIELD_DECL, NULL_TREE, ptr_type_node);
1695 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1696 insert_field_into_struct (ctx->record_type, field);
1697 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1698 (splay_tree_value) field);
1701 break;
1703 case OMP_CLAUSE_NOWAIT:
1704 case OMP_CLAUSE_ORDERED:
1705 case OMP_CLAUSE_COLLAPSE:
1706 case OMP_CLAUSE_UNTIED:
1707 case OMP_CLAUSE_MERGEABLE:
1708 case OMP_CLAUSE_PROC_BIND:
1709 case OMP_CLAUSE_SAFELEN:
1710 break;
1712 case OMP_CLAUSE_ALIGNED:
1713 decl = OMP_CLAUSE_DECL (c);
1714 if (is_global_var (decl)
1715 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1716 install_var_local (decl, ctx);
1717 break;
1719 default:
1720 gcc_unreachable ();
1724 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1726 switch (OMP_CLAUSE_CODE (c))
1728 case OMP_CLAUSE_LASTPRIVATE:
1729 /* Let the corresponding firstprivate clause create
1730 the variable. */
1731 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1732 scan_array_reductions = true;
1733 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1734 break;
1735 /* FALLTHRU */
1737 case OMP_CLAUSE_PRIVATE:
1738 case OMP_CLAUSE_FIRSTPRIVATE:
1739 case OMP_CLAUSE_REDUCTION:
1740 case OMP_CLAUSE_LINEAR:
1741 decl = OMP_CLAUSE_DECL (c);
1742 if (is_variable_sized (decl))
1743 install_var_local (decl, ctx);
1744 fixup_remapped_decl (decl, ctx,
1745 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1746 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1747 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1748 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1749 scan_array_reductions = true;
1750 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1751 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1752 scan_array_reductions = true;
1753 break;
1755 case OMP_CLAUSE_SHARED:
1756 /* Ignore shared directives in teams construct. */
1757 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1758 break;
1759 decl = OMP_CLAUSE_DECL (c);
1760 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1761 fixup_remapped_decl (decl, ctx, false);
1762 break;
1764 case OMP_CLAUSE_MAP:
1765 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1766 break;
1767 decl = OMP_CLAUSE_DECL (c);
1768 if (DECL_P (decl)
1769 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1770 && lookup_attribute ("omp declare target",
1771 DECL_ATTRIBUTES (decl)))
1772 break;
1773 if (DECL_P (decl))
1775 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1776 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1777 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1779 tree new_decl = lookup_decl (decl, ctx);
1780 TREE_TYPE (new_decl)
1781 = remap_type (TREE_TYPE (decl), &ctx->cb);
1783 else if (DECL_SIZE (decl)
1784 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1786 tree decl2 = DECL_VALUE_EXPR (decl);
1787 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1788 decl2 = TREE_OPERAND (decl2, 0);
1789 gcc_assert (DECL_P (decl2));
1790 fixup_remapped_decl (decl2, ctx, false);
1791 fixup_remapped_decl (decl, ctx, true);
1793 else
1794 fixup_remapped_decl (decl, ctx, false);
1796 break;
1798 case OMP_CLAUSE_COPYPRIVATE:
1799 case OMP_CLAUSE_COPYIN:
1800 case OMP_CLAUSE_DEFAULT:
1801 case OMP_CLAUSE_IF:
1802 case OMP_CLAUSE_NUM_THREADS:
1803 case OMP_CLAUSE_NUM_TEAMS:
1804 case OMP_CLAUSE_THREAD_LIMIT:
1805 case OMP_CLAUSE_DEVICE:
1806 case OMP_CLAUSE_SCHEDULE:
1807 case OMP_CLAUSE_DIST_SCHEDULE:
1808 case OMP_CLAUSE_NOWAIT:
1809 case OMP_CLAUSE_ORDERED:
1810 case OMP_CLAUSE_COLLAPSE:
1811 case OMP_CLAUSE_UNTIED:
1812 case OMP_CLAUSE_FINAL:
1813 case OMP_CLAUSE_MERGEABLE:
1814 case OMP_CLAUSE_PROC_BIND:
1815 case OMP_CLAUSE_SAFELEN:
1816 case OMP_CLAUSE_ALIGNED:
1817 case OMP_CLAUSE_DEPEND:
1818 case OMP_CLAUSE__LOOPTEMP_:
1819 case OMP_CLAUSE_TO:
1820 case OMP_CLAUSE_FROM:
1821 case OMP_CLAUSE__CILK_FOR_COUNT_:
1822 break;
1824 default:
1825 gcc_unreachable ();
1829 if (scan_array_reductions)
1830 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1831 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1832 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1834 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1835 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1837 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1838 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1839 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1840 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1841 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1842 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1845 /* Create a new name for omp child function. Returns an identifier. If
1846 IS_CILK_FOR is true then the suffix for the child function is
1847 "_cilk_for_fn." */
1849 static tree
1850 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
1852 if (is_cilk_for)
1853 return clone_function_name (current_function_decl, "_cilk_for_fn");
1854 return clone_function_name (current_function_decl,
1855 task_copy ? "_omp_cpyfn" : "_omp_fn");
1858 /* Returns the type of the induction variable for the child function for
1859 _Cilk_for and the types for _high and _low variables based on TYPE. */
1861 static tree
1862 cilk_for_check_loop_diff_type (tree type)
1864 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
1866 if (TYPE_UNSIGNED (type))
1867 return uint32_type_node;
1868 else
1869 return integer_type_node;
1871 else
1873 if (TYPE_UNSIGNED (type))
1874 return uint64_type_node;
1875 else
1876 return long_long_integer_type_node;
1880 /* Build a decl for the omp child function. It'll not contain a body
1881 yet, just the bare decl. */
1883 static void
1884 create_omp_child_function (omp_context *ctx, bool task_copy)
1886 tree decl, type, name, t;
1888 tree cilk_for_count
1889 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
1890 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1891 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
1892 tree cilk_var_type = NULL_TREE;
1894 name = create_omp_child_function_name (task_copy,
1895 cilk_for_count != NULL_TREE);
1896 if (task_copy)
1897 type = build_function_type_list (void_type_node, ptr_type_node,
1898 ptr_type_node, NULL_TREE);
1899 else if (cilk_for_count)
1901 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
1902 cilk_var_type = cilk_for_check_loop_diff_type (type);
1903 type = build_function_type_list (void_type_node, ptr_type_node,
1904 cilk_var_type, cilk_var_type, NULL_TREE);
1906 else
1907 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1909 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
1911 if (!task_copy)
1912 ctx->cb.dst_fn = decl;
1913 else
1914 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1916 TREE_STATIC (decl) = 1;
1917 TREE_USED (decl) = 1;
1918 DECL_ARTIFICIAL (decl) = 1;
1919 DECL_IGNORED_P (decl) = 0;
1920 TREE_PUBLIC (decl) = 0;
1921 DECL_UNINLINABLE (decl) = 1;
1922 DECL_EXTERNAL (decl) = 0;
1923 DECL_CONTEXT (decl) = NULL_TREE;
1924 DECL_INITIAL (decl) = make_node (BLOCK);
1925 bool target_p = false;
1926 if (lookup_attribute ("omp declare target",
1927 DECL_ATTRIBUTES (current_function_decl)))
1928 target_p = true;
1929 else
1931 omp_context *octx;
1932 for (octx = ctx; octx; octx = octx->outer)
1933 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1934 && gimple_omp_target_kind (octx->stmt)
1935 == GF_OMP_TARGET_KIND_REGION)
1937 target_p = true;
1938 break;
1941 if (target_p)
1942 DECL_ATTRIBUTES (decl)
1943 = tree_cons (get_identifier ("omp declare target"),
1944 NULL_TREE, DECL_ATTRIBUTES (decl));
1946 t = build_decl (DECL_SOURCE_LOCATION (decl),
1947 RESULT_DECL, NULL_TREE, void_type_node);
1948 DECL_ARTIFICIAL (t) = 1;
1949 DECL_IGNORED_P (t) = 1;
1950 DECL_CONTEXT (t) = decl;
1951 DECL_RESULT (decl) = t;
1953 /* _Cilk_for's child function requires two extra parameters called
1954 __low and __high that are set the by Cilk runtime when it calls this
1955 function. */
1956 if (cilk_for_count)
1958 t = build_decl (DECL_SOURCE_LOCATION (decl),
1959 PARM_DECL, get_identifier ("__high"), cilk_var_type);
1960 DECL_ARTIFICIAL (t) = 1;
1961 DECL_NAMELESS (t) = 1;
1962 DECL_ARG_TYPE (t) = ptr_type_node;
1963 DECL_CONTEXT (t) = current_function_decl;
1964 TREE_USED (t) = 1;
1965 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1966 DECL_ARGUMENTS (decl) = t;
1968 t = build_decl (DECL_SOURCE_LOCATION (decl),
1969 PARM_DECL, get_identifier ("__low"), cilk_var_type);
1970 DECL_ARTIFICIAL (t) = 1;
1971 DECL_NAMELESS (t) = 1;
1972 DECL_ARG_TYPE (t) = ptr_type_node;
1973 DECL_CONTEXT (t) = current_function_decl;
1974 TREE_USED (t) = 1;
1975 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1976 DECL_ARGUMENTS (decl) = t;
1979 tree data_name = get_identifier (".omp_data_i");
1980 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
1981 ptr_type_node);
1982 DECL_ARTIFICIAL (t) = 1;
1983 DECL_NAMELESS (t) = 1;
1984 DECL_ARG_TYPE (t) = ptr_type_node;
1985 DECL_CONTEXT (t) = current_function_decl;
1986 TREE_USED (t) = 1;
1987 if (cilk_for_count)
1988 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1989 DECL_ARGUMENTS (decl) = t;
1990 if (!task_copy)
1991 ctx->receiver_decl = t;
1992 else
1994 t = build_decl (DECL_SOURCE_LOCATION (decl),
1995 PARM_DECL, get_identifier (".omp_data_o"),
1996 ptr_type_node);
1997 DECL_ARTIFICIAL (t) = 1;
1998 DECL_NAMELESS (t) = 1;
1999 DECL_ARG_TYPE (t) = ptr_type_node;
2000 DECL_CONTEXT (t) = current_function_decl;
2001 TREE_USED (t) = 1;
2002 TREE_ADDRESSABLE (t) = 1;
2003 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2004 DECL_ARGUMENTS (decl) = t;
2007 /* Allocate memory for the function structure. The call to
2008 allocate_struct_function clobbers CFUN, so we need to restore
2009 it afterward. */
2010 push_struct_function (decl);
2011 cfun->function_end_locus = gimple_location (ctx->stmt);
2012 pop_cfun ();
2015 /* Callback for walk_gimple_seq. Check if combined parallel
2016 contains gimple_omp_for_combined_into_p OMP_FOR. */
2018 static tree
2019 find_combined_for (gimple_stmt_iterator *gsi_p,
2020 bool *handled_ops_p,
2021 struct walk_stmt_info *wi)
2023 gimple stmt = gsi_stmt (*gsi_p);
2025 *handled_ops_p = true;
2026 switch (gimple_code (stmt))
2028 WALK_SUBSTMTS;
2030 case GIMPLE_OMP_FOR:
2031 if (gimple_omp_for_combined_into_p (stmt)
2032 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2034 wi->info = stmt;
2035 return integer_zero_node;
2037 break;
2038 default:
2039 break;
2041 return NULL;
2044 /* Scan an OpenMP parallel directive. */
2046 static void
2047 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2049 omp_context *ctx;
2050 tree name;
2051 gimple stmt = gsi_stmt (*gsi);
2053 /* Ignore parallel directives with empty bodies, unless there
2054 are copyin clauses. */
2055 if (optimize > 0
2056 && empty_body_p (gimple_omp_body (stmt))
2057 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2058 OMP_CLAUSE_COPYIN) == NULL)
2060 gsi_replace (gsi, gimple_build_nop (), false);
2061 return;
2064 if (gimple_omp_parallel_combined_p (stmt))
2066 gimple for_stmt;
2067 struct walk_stmt_info wi;
2069 memset (&wi, 0, sizeof (wi));
2070 wi.val_only = true;
2071 walk_gimple_seq (gimple_omp_body (stmt),
2072 find_combined_for, NULL, &wi);
2073 for_stmt = (gimple) wi.info;
2074 if (for_stmt)
2076 struct omp_for_data fd;
2077 extract_omp_for_data (for_stmt, &fd, NULL);
2078 /* We need two temporaries with fd.loop.v type (istart/iend)
2079 and then (fd.collapse - 1) temporaries with the same
2080 type for count2 ... countN-1 vars if not constant. */
2081 size_t count = 2, i;
2082 tree type = fd.iter_type;
2083 if (fd.collapse > 1
2084 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2085 count += fd.collapse - 1;
2086 for (i = 0; i < count; i++)
2088 tree temp = create_tmp_var (type, NULL);
2089 tree c = build_omp_clause (UNKNOWN_LOCATION,
2090 OMP_CLAUSE__LOOPTEMP_);
2091 insert_decl_map (&outer_ctx->cb, temp, temp);
2092 OMP_CLAUSE_DECL (c) = temp;
2093 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2094 gimple_omp_parallel_set_clauses (stmt, c);
2099 ctx = new_omp_context (stmt, outer_ctx);
2100 if (taskreg_nesting_level > 1)
2101 ctx->is_nested = true;
2102 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2103 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2104 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2105 name = create_tmp_var_name (".omp_data_s");
2106 name = build_decl (gimple_location (stmt),
2107 TYPE_DECL, name, ctx->record_type);
2108 DECL_ARTIFICIAL (name) = 1;
2109 DECL_NAMELESS (name) = 1;
2110 TYPE_NAME (ctx->record_type) = name;
2111 create_omp_child_function (ctx, false);
2112 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2114 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2115 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2117 if (TYPE_FIELDS (ctx->record_type) == NULL)
2118 ctx->record_type = ctx->receiver_decl = NULL;
2119 else
2121 layout_type (ctx->record_type);
2122 fixup_child_record_type (ctx);
2126 /* Scan an OpenMP task directive. */
2128 static void
2129 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2131 omp_context *ctx;
2132 tree name, t;
2133 gimple stmt = gsi_stmt (*gsi);
2134 location_t loc = gimple_location (stmt);
2136 /* Ignore task directives with empty bodies. */
2137 if (optimize > 0
2138 && empty_body_p (gimple_omp_body (stmt)))
2140 gsi_replace (gsi, gimple_build_nop (), false);
2141 return;
2144 ctx = new_omp_context (stmt, outer_ctx);
2145 if (taskreg_nesting_level > 1)
2146 ctx->is_nested = true;
2147 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2148 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2149 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2150 name = create_tmp_var_name (".omp_data_s");
2151 name = build_decl (gimple_location (stmt),
2152 TYPE_DECL, name, ctx->record_type);
2153 DECL_ARTIFICIAL (name) = 1;
2154 DECL_NAMELESS (name) = 1;
2155 TYPE_NAME (ctx->record_type) = name;
2156 create_omp_child_function (ctx, false);
2157 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2159 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2161 if (ctx->srecord_type)
2163 name = create_tmp_var_name (".omp_data_a");
2164 name = build_decl (gimple_location (stmt),
2165 TYPE_DECL, name, ctx->srecord_type);
2166 DECL_ARTIFICIAL (name) = 1;
2167 DECL_NAMELESS (name) = 1;
2168 TYPE_NAME (ctx->srecord_type) = name;
2169 create_omp_child_function (ctx, true);
2172 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2174 if (TYPE_FIELDS (ctx->record_type) == NULL)
2176 ctx->record_type = ctx->receiver_decl = NULL;
2177 t = build_int_cst (long_integer_type_node, 0);
2178 gimple_omp_task_set_arg_size (stmt, t);
2179 t = build_int_cst (long_integer_type_node, 1);
2180 gimple_omp_task_set_arg_align (stmt, t);
2182 else
2184 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2185 /* Move VLA fields to the end. */
2186 p = &TYPE_FIELDS (ctx->record_type);
2187 while (*p)
2188 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2189 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2191 *q = *p;
2192 *p = TREE_CHAIN (*p);
2193 TREE_CHAIN (*q) = NULL_TREE;
2194 q = &TREE_CHAIN (*q);
2196 else
2197 p = &DECL_CHAIN (*p);
2198 *p = vla_fields;
2199 layout_type (ctx->record_type);
2200 fixup_child_record_type (ctx);
2201 if (ctx->srecord_type)
2202 layout_type (ctx->srecord_type);
2203 t = fold_convert_loc (loc, long_integer_type_node,
2204 TYPE_SIZE_UNIT (ctx->record_type));
2205 gimple_omp_task_set_arg_size (stmt, t);
2206 t = build_int_cst (long_integer_type_node,
2207 TYPE_ALIGN_UNIT (ctx->record_type));
2208 gimple_omp_task_set_arg_align (stmt, t);
2213 /* Scan an OpenMP loop directive. */
2215 static void
2216 scan_omp_for (gimple stmt, omp_context *outer_ctx)
2218 omp_context *ctx;
2219 size_t i;
2221 ctx = new_omp_context (stmt, outer_ctx);
2223 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2225 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2226 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2228 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2229 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2230 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2231 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2233 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2236 /* Scan an OpenMP sections directive. */
2238 static void
2239 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
2241 omp_context *ctx;
2243 ctx = new_omp_context (stmt, outer_ctx);
2244 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2245 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2248 /* Scan an OpenMP single directive. */
2250 static void
2251 scan_omp_single (gimple stmt, omp_context *outer_ctx)
2253 omp_context *ctx;
2254 tree name;
2256 ctx = new_omp_context (stmt, outer_ctx);
2257 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2258 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2259 name = create_tmp_var_name (".omp_copy_s");
2260 name = build_decl (gimple_location (stmt),
2261 TYPE_DECL, name, ctx->record_type);
2262 TYPE_NAME (ctx->record_type) = name;
2264 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2265 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2267 if (TYPE_FIELDS (ctx->record_type) == NULL)
2268 ctx->record_type = NULL;
2269 else
2270 layout_type (ctx->record_type);
2273 /* Scan an OpenMP target{, data, update} directive. */
2275 static void
2276 scan_omp_target (gimple stmt, omp_context *outer_ctx)
2278 omp_context *ctx;
2279 tree name;
2280 int kind = gimple_omp_target_kind (stmt);
2282 ctx = new_omp_context (stmt, outer_ctx);
2283 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2284 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2285 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2286 name = create_tmp_var_name (".omp_data_t");
2287 name = build_decl (gimple_location (stmt),
2288 TYPE_DECL, name, ctx->record_type);
2289 DECL_ARTIFICIAL (name) = 1;
2290 DECL_NAMELESS (name) = 1;
2291 TYPE_NAME (ctx->record_type) = name;
2292 if (kind == GF_OMP_TARGET_KIND_REGION)
2294 create_omp_child_function (ctx, false);
2295 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2298 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2299 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2301 if (TYPE_FIELDS (ctx->record_type) == NULL)
2302 ctx->record_type = ctx->receiver_decl = NULL;
2303 else
2305 TYPE_FIELDS (ctx->record_type)
2306 = nreverse (TYPE_FIELDS (ctx->record_type));
2307 #ifdef ENABLE_CHECKING
2308 tree field;
2309 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2310 for (field = TYPE_FIELDS (ctx->record_type);
2311 field;
2312 field = DECL_CHAIN (field))
2313 gcc_assert (DECL_ALIGN (field) == align);
2314 #endif
2315 layout_type (ctx->record_type);
2316 if (kind == GF_OMP_TARGET_KIND_REGION)
2317 fixup_child_record_type (ctx);
2321 /* Scan an OpenMP teams directive. */
2323 static void
2324 scan_omp_teams (gimple stmt, omp_context *outer_ctx)
2326 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2327 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2328 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2331 /* Check OpenMP nesting restrictions. */
2332 static bool
2333 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2335 if (ctx != NULL)
2337 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2338 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2340 error_at (gimple_location (stmt),
2341 "OpenMP constructs may not be nested inside simd region");
2342 return false;
2344 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2346 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2347 || (gimple_omp_for_kind (stmt)
2348 != GF_OMP_FOR_KIND_DISTRIBUTE))
2349 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2351 error_at (gimple_location (stmt),
2352 "only distribute or parallel constructs are allowed to "
2353 "be closely nested inside teams construct");
2354 return false;
2358 switch (gimple_code (stmt))
2360 case GIMPLE_OMP_FOR:
2361 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2362 return true;
2363 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2365 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2367 error_at (gimple_location (stmt),
2368 "distribute construct must be closely nested inside "
2369 "teams construct");
2370 return false;
2372 return true;
2374 /* FALLTHRU */
2375 case GIMPLE_CALL:
2376 if (is_gimple_call (stmt)
2377 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2378 == BUILT_IN_GOMP_CANCEL
2379 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2380 == BUILT_IN_GOMP_CANCELLATION_POINT))
2382 const char *bad = NULL;
2383 const char *kind = NULL;
2384 if (ctx == NULL)
2386 error_at (gimple_location (stmt), "orphaned %qs construct",
2387 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2388 == BUILT_IN_GOMP_CANCEL
2389 ? "#pragma omp cancel"
2390 : "#pragma omp cancellation point");
2391 return false;
2393 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2394 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2395 : 0)
2397 case 1:
2398 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2399 bad = "#pragma omp parallel";
2400 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2401 == BUILT_IN_GOMP_CANCEL
2402 && !integer_zerop (gimple_call_arg (stmt, 1)))
2403 ctx->cancellable = true;
2404 kind = "parallel";
2405 break;
2406 case 2:
2407 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2408 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2409 bad = "#pragma omp for";
2410 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2411 == BUILT_IN_GOMP_CANCEL
2412 && !integer_zerop (gimple_call_arg (stmt, 1)))
2414 ctx->cancellable = true;
2415 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2416 OMP_CLAUSE_NOWAIT))
2417 warning_at (gimple_location (stmt), 0,
2418 "%<#pragma omp cancel for%> inside "
2419 "%<nowait%> for construct");
2420 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2421 OMP_CLAUSE_ORDERED))
2422 warning_at (gimple_location (stmt), 0,
2423 "%<#pragma omp cancel for%> inside "
2424 "%<ordered%> for construct");
2426 kind = "for";
2427 break;
2428 case 4:
2429 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2430 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2431 bad = "#pragma omp sections";
2432 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2433 == BUILT_IN_GOMP_CANCEL
2434 && !integer_zerop (gimple_call_arg (stmt, 1)))
2436 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2438 ctx->cancellable = true;
2439 if (find_omp_clause (gimple_omp_sections_clauses
2440 (ctx->stmt),
2441 OMP_CLAUSE_NOWAIT))
2442 warning_at (gimple_location (stmt), 0,
2443 "%<#pragma omp cancel sections%> inside "
2444 "%<nowait%> sections construct");
2446 else
2448 gcc_assert (ctx->outer
2449 && gimple_code (ctx->outer->stmt)
2450 == GIMPLE_OMP_SECTIONS);
2451 ctx->outer->cancellable = true;
2452 if (find_omp_clause (gimple_omp_sections_clauses
2453 (ctx->outer->stmt),
2454 OMP_CLAUSE_NOWAIT))
2455 warning_at (gimple_location (stmt), 0,
2456 "%<#pragma omp cancel sections%> inside "
2457 "%<nowait%> sections construct");
2460 kind = "sections";
2461 break;
2462 case 8:
2463 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2464 bad = "#pragma omp task";
2465 else
2466 ctx->cancellable = true;
2467 kind = "taskgroup";
2468 break;
2469 default:
2470 error_at (gimple_location (stmt), "invalid arguments");
2471 return false;
2473 if (bad)
2475 error_at (gimple_location (stmt),
2476 "%<%s %s%> construct not closely nested inside of %qs",
2477 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2478 == BUILT_IN_GOMP_CANCEL
2479 ? "#pragma omp cancel"
2480 : "#pragma omp cancellation point", kind, bad);
2481 return false;
2484 /* FALLTHRU */
2485 case GIMPLE_OMP_SECTIONS:
2486 case GIMPLE_OMP_SINGLE:
2487 for (; ctx != NULL; ctx = ctx->outer)
2488 switch (gimple_code (ctx->stmt))
2490 case GIMPLE_OMP_FOR:
2491 case GIMPLE_OMP_SECTIONS:
2492 case GIMPLE_OMP_SINGLE:
2493 case GIMPLE_OMP_ORDERED:
2494 case GIMPLE_OMP_MASTER:
2495 case GIMPLE_OMP_TASK:
2496 case GIMPLE_OMP_CRITICAL:
2497 if (is_gimple_call (stmt))
2499 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2500 != BUILT_IN_GOMP_BARRIER)
2501 return true;
2502 error_at (gimple_location (stmt),
2503 "barrier region may not be closely nested inside "
2504 "of work-sharing, critical, ordered, master or "
2505 "explicit task region");
2506 return false;
2508 error_at (gimple_location (stmt),
2509 "work-sharing region may not be closely nested inside "
2510 "of work-sharing, critical, ordered, master or explicit "
2511 "task region");
2512 return false;
2513 case GIMPLE_OMP_PARALLEL:
2514 return true;
2515 default:
2516 break;
2518 break;
2519 case GIMPLE_OMP_MASTER:
2520 for (; ctx != NULL; ctx = ctx->outer)
2521 switch (gimple_code (ctx->stmt))
2523 case GIMPLE_OMP_FOR:
2524 case GIMPLE_OMP_SECTIONS:
2525 case GIMPLE_OMP_SINGLE:
2526 case GIMPLE_OMP_TASK:
2527 error_at (gimple_location (stmt),
2528 "master region may not be closely nested inside "
2529 "of work-sharing or explicit task region");
2530 return false;
2531 case GIMPLE_OMP_PARALLEL:
2532 return true;
2533 default:
2534 break;
2536 break;
2537 case GIMPLE_OMP_ORDERED:
2538 for (; ctx != NULL; ctx = ctx->outer)
2539 switch (gimple_code (ctx->stmt))
2541 case GIMPLE_OMP_CRITICAL:
2542 case GIMPLE_OMP_TASK:
2543 error_at (gimple_location (stmt),
2544 "ordered region may not be closely nested inside "
2545 "of critical or explicit task region");
2546 return false;
2547 case GIMPLE_OMP_FOR:
2548 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2549 OMP_CLAUSE_ORDERED) == NULL)
2551 error_at (gimple_location (stmt),
2552 "ordered region must be closely nested inside "
2553 "a loop region with an ordered clause");
2554 return false;
2556 return true;
2557 case GIMPLE_OMP_PARALLEL:
2558 error_at (gimple_location (stmt),
2559 "ordered region must be closely nested inside "
2560 "a loop region with an ordered clause");
2561 return false;
2562 default:
2563 break;
2565 break;
2566 case GIMPLE_OMP_CRITICAL:
2567 for (; ctx != NULL; ctx = ctx->outer)
2568 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
2569 && (gimple_omp_critical_name (stmt)
2570 == gimple_omp_critical_name (ctx->stmt)))
2572 error_at (gimple_location (stmt),
2573 "critical region may not be nested inside a critical "
2574 "region with the same name");
2575 return false;
2577 break;
2578 case GIMPLE_OMP_TEAMS:
2579 if (ctx == NULL
2580 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2581 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2583 error_at (gimple_location (stmt),
2584 "teams construct not closely nested inside of target "
2585 "region");
2586 return false;
2588 break;
2589 case GIMPLE_OMP_TARGET:
2590 for (; ctx != NULL; ctx = ctx->outer)
2591 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
2592 && gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_REGION)
2594 const char *name;
2595 switch (gimple_omp_target_kind (stmt))
2597 case GF_OMP_TARGET_KIND_REGION: name = "target"; break;
2598 case GF_OMP_TARGET_KIND_DATA: name = "target data"; break;
2599 case GF_OMP_TARGET_KIND_UPDATE: name = "target update"; break;
2600 default: gcc_unreachable ();
2602 warning_at (gimple_location (stmt), 0,
2603 "%s construct inside of target region", name);
2605 break;
2606 default:
2607 break;
2609 return true;
2613 /* Helper function scan_omp.
2615 Callback for walk_tree or operators in walk_gimple_stmt used to
2616 scan for OpenMP directives in TP. */
2618 static tree
2619 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2621 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2622 omp_context *ctx = (omp_context *) wi->info;
2623 tree t = *tp;
2625 switch (TREE_CODE (t))
2627 case VAR_DECL:
2628 case PARM_DECL:
2629 case LABEL_DECL:
2630 case RESULT_DECL:
2631 if (ctx)
2632 *tp = remap_decl (t, &ctx->cb);
2633 break;
2635 default:
2636 if (ctx && TYPE_P (t))
2637 *tp = remap_type (t, &ctx->cb);
2638 else if (!DECL_P (t))
2640 *walk_subtrees = 1;
2641 if (ctx)
2643 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2644 if (tem != TREE_TYPE (t))
2646 if (TREE_CODE (t) == INTEGER_CST)
2647 *tp = wide_int_to_tree (tem, t);
2648 else
2649 TREE_TYPE (t) = tem;
2653 break;
2656 return NULL_TREE;
2659 /* Return true if FNDECL is a setjmp or a longjmp. */
2661 static bool
2662 setjmp_or_longjmp_p (const_tree fndecl)
2664 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2665 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2666 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2667 return true;
2669 tree declname = DECL_NAME (fndecl);
2670 if (!declname)
2671 return false;
2672 const char *name = IDENTIFIER_POINTER (declname);
2673 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2677 /* Helper function for scan_omp.
2679 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2680 the current statement in GSI. */
2682 static tree
2683 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2684 struct walk_stmt_info *wi)
2686 gimple stmt = gsi_stmt (*gsi);
2687 omp_context *ctx = (omp_context *) wi->info;
2689 if (gimple_has_location (stmt))
2690 input_location = gimple_location (stmt);
2692 /* Check the OpenMP nesting restrictions. */
2693 bool remove = false;
2694 if (is_gimple_omp (stmt))
2695 remove = !check_omp_nesting_restrictions (stmt, ctx);
2696 else if (is_gimple_call (stmt))
2698 tree fndecl = gimple_call_fndecl (stmt);
2699 if (fndecl)
2701 if (setjmp_or_longjmp_p (fndecl)
2702 && ctx
2703 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2704 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2706 remove = true;
2707 error_at (gimple_location (stmt),
2708 "setjmp/longjmp inside simd construct");
2710 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2711 switch (DECL_FUNCTION_CODE (fndecl))
2713 case BUILT_IN_GOMP_BARRIER:
2714 case BUILT_IN_GOMP_CANCEL:
2715 case BUILT_IN_GOMP_CANCELLATION_POINT:
2716 case BUILT_IN_GOMP_TASKYIELD:
2717 case BUILT_IN_GOMP_TASKWAIT:
2718 case BUILT_IN_GOMP_TASKGROUP_START:
2719 case BUILT_IN_GOMP_TASKGROUP_END:
2720 remove = !check_omp_nesting_restrictions (stmt, ctx);
2721 break;
2722 default:
2723 break;
2727 if (remove)
2729 stmt = gimple_build_nop ();
2730 gsi_replace (gsi, stmt, false);
2733 *handled_ops_p = true;
2735 switch (gimple_code (stmt))
2737 case GIMPLE_OMP_PARALLEL:
2738 taskreg_nesting_level++;
2739 scan_omp_parallel (gsi, ctx);
2740 taskreg_nesting_level--;
2741 break;
2743 case GIMPLE_OMP_TASK:
2744 taskreg_nesting_level++;
2745 scan_omp_task (gsi, ctx);
2746 taskreg_nesting_level--;
2747 break;
2749 case GIMPLE_OMP_FOR:
2750 scan_omp_for (stmt, ctx);
2751 break;
2753 case GIMPLE_OMP_SECTIONS:
2754 scan_omp_sections (stmt, ctx);
2755 break;
2757 case GIMPLE_OMP_SINGLE:
2758 scan_omp_single (stmt, ctx);
2759 break;
2761 case GIMPLE_OMP_SECTION:
2762 case GIMPLE_OMP_MASTER:
2763 case GIMPLE_OMP_TASKGROUP:
2764 case GIMPLE_OMP_ORDERED:
2765 case GIMPLE_OMP_CRITICAL:
2766 ctx = new_omp_context (stmt, ctx);
2767 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2768 break;
2770 case GIMPLE_OMP_TARGET:
2771 scan_omp_target (stmt, ctx);
2772 break;
2774 case GIMPLE_OMP_TEAMS:
2775 scan_omp_teams (stmt, ctx);
2776 break;
2778 case GIMPLE_BIND:
2780 tree var;
2782 *handled_ops_p = false;
2783 if (ctx)
2784 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2785 insert_decl_map (&ctx->cb, var, var);
2787 break;
2788 default:
2789 *handled_ops_p = false;
2790 break;
2793 return NULL_TREE;
2797 /* Scan all the statements starting at the current statement. CTX
2798 contains context information about the OpenMP directives and
2799 clauses found during the scan. */
2801 static void
2802 scan_omp (gimple_seq *body_p, omp_context *ctx)
2804 location_t saved_location;
2805 struct walk_stmt_info wi;
2807 memset (&wi, 0, sizeof (wi));
2808 wi.info = ctx;
2809 wi.want_locations = true;
2811 saved_location = input_location;
2812 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2813 input_location = saved_location;
2816 /* Re-gimplification and code generation routines. */
2818 /* Build a call to GOMP_barrier. */
2820 static gimple
2821 build_omp_barrier (tree lhs)
2823 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2824 : BUILT_IN_GOMP_BARRIER);
2825 gimple g = gimple_build_call (fndecl, 0);
2826 if (lhs)
2827 gimple_call_set_lhs (g, lhs);
2828 return g;
2831 /* If a context was created for STMT when it was scanned, return it. */
2833 static omp_context *
2834 maybe_lookup_ctx (gimple stmt)
2836 splay_tree_node n;
2837 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2838 return n ? (omp_context *) n->value : NULL;
2842 /* Find the mapping for DECL in CTX or the immediately enclosing
2843 context that has a mapping for DECL.
2845 If CTX is a nested parallel directive, we may have to use the decl
2846 mappings created in CTX's parent context. Suppose that we have the
2847 following parallel nesting (variable UIDs showed for clarity):
2849 iD.1562 = 0;
2850 #omp parallel shared(iD.1562) -> outer parallel
2851 iD.1562 = iD.1562 + 1;
2853 #omp parallel shared (iD.1562) -> inner parallel
2854 iD.1562 = iD.1562 - 1;
2856 Each parallel structure will create a distinct .omp_data_s structure
2857 for copying iD.1562 in/out of the directive:
2859 outer parallel .omp_data_s.1.i -> iD.1562
2860 inner parallel .omp_data_s.2.i -> iD.1562
2862 A shared variable mapping will produce a copy-out operation before
2863 the parallel directive and a copy-in operation after it. So, in
2864 this case we would have:
2866 iD.1562 = 0;
2867 .omp_data_o.1.i = iD.1562;
2868 #omp parallel shared(iD.1562) -> outer parallel
2869 .omp_data_i.1 = &.omp_data_o.1
2870 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2872 .omp_data_o.2.i = iD.1562; -> **
2873 #omp parallel shared(iD.1562) -> inner parallel
2874 .omp_data_i.2 = &.omp_data_o.2
2875 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2878 ** This is a problem. The symbol iD.1562 cannot be referenced
2879 inside the body of the outer parallel region. But since we are
2880 emitting this copy operation while expanding the inner parallel
2881 directive, we need to access the CTX structure of the outer
2882 parallel directive to get the correct mapping:
2884 .omp_data_o.2.i = .omp_data_i.1->i
2886 Since there may be other workshare or parallel directives enclosing
2887 the parallel directive, it may be necessary to walk up the context
2888 parent chain. This is not a problem in general because nested
2889 parallelism happens only rarely. */
2891 static tree
2892 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2894 tree t;
2895 omp_context *up;
2897 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2898 t = maybe_lookup_decl (decl, up);
2900 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2902 return t ? t : decl;
2906 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2907 in outer contexts. */
2909 static tree
2910 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2912 tree t = NULL;
2913 omp_context *up;
2915 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2916 t = maybe_lookup_decl (decl, up);
2918 return t ? t : decl;
2922 /* Construct the initialization value for reduction CLAUSE. */
2924 tree
2925 omp_reduction_init (tree clause, tree type)
2927 location_t loc = OMP_CLAUSE_LOCATION (clause);
2928 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2930 case PLUS_EXPR:
2931 case MINUS_EXPR:
2932 case BIT_IOR_EXPR:
2933 case BIT_XOR_EXPR:
2934 case TRUTH_OR_EXPR:
2935 case TRUTH_ORIF_EXPR:
2936 case TRUTH_XOR_EXPR:
2937 case NE_EXPR:
2938 return build_zero_cst (type);
2940 case MULT_EXPR:
2941 case TRUTH_AND_EXPR:
2942 case TRUTH_ANDIF_EXPR:
2943 case EQ_EXPR:
2944 return fold_convert_loc (loc, type, integer_one_node);
2946 case BIT_AND_EXPR:
2947 return fold_convert_loc (loc, type, integer_minus_one_node);
2949 case MAX_EXPR:
2950 if (SCALAR_FLOAT_TYPE_P (type))
2952 REAL_VALUE_TYPE max, min;
2953 if (HONOR_INFINITIES (TYPE_MODE (type)))
2955 real_inf (&max);
2956 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2958 else
2959 real_maxval (&min, 1, TYPE_MODE (type));
2960 return build_real (type, min);
2962 else
2964 gcc_assert (INTEGRAL_TYPE_P (type));
2965 return TYPE_MIN_VALUE (type);
2968 case MIN_EXPR:
2969 if (SCALAR_FLOAT_TYPE_P (type))
2971 REAL_VALUE_TYPE max;
2972 if (HONOR_INFINITIES (TYPE_MODE (type)))
2973 real_inf (&max);
2974 else
2975 real_maxval (&max, 0, TYPE_MODE (type));
2976 return build_real (type, max);
2978 else
2980 gcc_assert (INTEGRAL_TYPE_P (type));
2981 return TYPE_MAX_VALUE (type);
2984 default:
2985 gcc_unreachable ();
2989 /* Return alignment to be assumed for var in CLAUSE, which should be
2990 OMP_CLAUSE_ALIGNED. */
2992 static tree
2993 omp_clause_aligned_alignment (tree clause)
2995 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
2996 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
2998 /* Otherwise return implementation defined alignment. */
2999 unsigned int al = 1;
3000 enum machine_mode mode, vmode;
3001 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3002 if (vs)
3003 vs = 1 << floor_log2 (vs);
3004 static enum mode_class classes[]
3005 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3006 for (int i = 0; i < 4; i += 2)
3007 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3008 mode != VOIDmode;
3009 mode = GET_MODE_WIDER_MODE (mode))
3011 vmode = targetm.vectorize.preferred_simd_mode (mode);
3012 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3013 continue;
3014 while (vs
3015 && GET_MODE_SIZE (vmode) < vs
3016 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3017 vmode = GET_MODE_2XWIDER_MODE (vmode);
3019 tree type = lang_hooks.types.type_for_mode (mode, 1);
3020 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3021 continue;
3022 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3023 / GET_MODE_SIZE (mode));
3024 if (TYPE_MODE (type) != vmode)
3025 continue;
3026 if (TYPE_ALIGN_UNIT (type) > al)
3027 al = TYPE_ALIGN_UNIT (type);
3029 return build_int_cst (integer_type_node, al);
3032 /* Return maximum possible vectorization factor for the target. */
3034 static int
3035 omp_max_vf (void)
3037 if (!optimize
3038 || optimize_debug
3039 || !flag_tree_loop_optimize
3040 || (!flag_tree_loop_vectorize
3041 && (global_options_set.x_flag_tree_loop_vectorize
3042 || global_options_set.x_flag_tree_vectorize)))
3043 return 1;
3045 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3046 if (vs)
3048 vs = 1 << floor_log2 (vs);
3049 return vs;
3051 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3052 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3053 return GET_MODE_NUNITS (vqimode);
3054 return 1;
3057 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3058 privatization. */
3060 static bool
3061 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3062 tree &idx, tree &lane, tree &ivar, tree &lvar)
3064 if (max_vf == 0)
3066 max_vf = omp_max_vf ();
3067 if (max_vf > 1)
3069 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3070 OMP_CLAUSE_SAFELEN);
3071 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3072 max_vf = 1;
3073 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3074 max_vf) == -1)
3075 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3077 if (max_vf > 1)
3079 idx = create_tmp_var (unsigned_type_node, NULL);
3080 lane = create_tmp_var (unsigned_type_node, NULL);
3083 if (max_vf == 1)
3084 return false;
3086 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3087 tree avar = create_tmp_var_raw (atype, NULL);
3088 if (TREE_ADDRESSABLE (new_var))
3089 TREE_ADDRESSABLE (avar) = 1;
3090 DECL_ATTRIBUTES (avar)
3091 = tree_cons (get_identifier ("omp simd array"), NULL,
3092 DECL_ATTRIBUTES (avar));
3093 gimple_add_tmp_var (avar);
3094 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3095 NULL_TREE, NULL_TREE);
3096 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3097 NULL_TREE, NULL_TREE);
3098 if (DECL_P (new_var))
3100 SET_DECL_VALUE_EXPR (new_var, lvar);
3101 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3103 return true;
3106 /* Helper function of lower_rec_input_clauses. For a reference
3107 in simd reduction, add an underlying variable it will reference. */
3109 static void
3110 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3112 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3113 if (TREE_CONSTANT (z))
3115 const char *name = NULL;
3116 if (DECL_NAME (new_vard))
3117 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3119 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3120 gimple_add_tmp_var (z);
3121 TREE_ADDRESSABLE (z) = 1;
3122 z = build_fold_addr_expr_loc (loc, z);
3123 gimplify_assign (new_vard, z, ilist);
3127 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3128 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3129 private variables. Initialization statements go in ILIST, while calls
3130 to destructors go in DLIST. */
3132 static void
3133 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3134 omp_context *ctx, struct omp_for_data *fd)
3136 tree c, dtor, copyin_seq, x, ptr;
3137 bool copyin_by_ref = false;
3138 bool lastprivate_firstprivate = false;
3139 bool reduction_omp_orig_ref = false;
3140 int pass;
3141 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3142 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3143 int max_vf = 0;
3144 tree lane = NULL_TREE, idx = NULL_TREE;
3145 tree ivar = NULL_TREE, lvar = NULL_TREE;
3146 gimple_seq llist[2] = { NULL, NULL };
3148 copyin_seq = NULL;
3150 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3151 with data sharing clauses referencing variable sized vars. That
3152 is unnecessarily hard to support and very unlikely to result in
3153 vectorized code anyway. */
3154 if (is_simd)
3155 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3156 switch (OMP_CLAUSE_CODE (c))
3158 case OMP_CLAUSE_LINEAR:
3159 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3160 max_vf = 1;
3161 /* FALLTHRU */
3162 case OMP_CLAUSE_REDUCTION:
3163 case OMP_CLAUSE_PRIVATE:
3164 case OMP_CLAUSE_FIRSTPRIVATE:
3165 case OMP_CLAUSE_LASTPRIVATE:
3166 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3167 max_vf = 1;
3168 break;
3169 default:
3170 continue;
3173 /* Do all the fixed sized types in the first pass, and the variable sized
3174 types in the second pass. This makes sure that the scalar arguments to
3175 the variable sized types are processed before we use them in the
3176 variable sized operations. */
3177 for (pass = 0; pass < 2; ++pass)
3179 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3181 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3182 tree var, new_var;
3183 bool by_ref;
3184 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3186 switch (c_kind)
3188 case OMP_CLAUSE_PRIVATE:
3189 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3190 continue;
3191 break;
3192 case OMP_CLAUSE_SHARED:
3193 /* Ignore shared directives in teams construct. */
3194 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3195 continue;
3196 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3198 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3199 continue;
3201 case OMP_CLAUSE_FIRSTPRIVATE:
3202 case OMP_CLAUSE_COPYIN:
3203 case OMP_CLAUSE_LINEAR:
3204 break;
3205 case OMP_CLAUSE_REDUCTION:
3206 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3207 reduction_omp_orig_ref = true;
3208 break;
3209 case OMP_CLAUSE__LOOPTEMP_:
3210 /* Handle _looptemp_ clauses only on parallel. */
3211 if (fd)
3212 continue;
3213 break;
3214 case OMP_CLAUSE_LASTPRIVATE:
3215 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3217 lastprivate_firstprivate = true;
3218 if (pass != 0)
3219 continue;
3221 /* Even without corresponding firstprivate, if
3222 decl is Fortran allocatable, it needs outer var
3223 reference. */
3224 else if (pass == 0
3225 && lang_hooks.decls.omp_private_outer_ref
3226 (OMP_CLAUSE_DECL (c)))
3227 lastprivate_firstprivate = true;
3228 break;
3229 case OMP_CLAUSE_ALIGNED:
3230 if (pass == 0)
3231 continue;
3232 var = OMP_CLAUSE_DECL (c);
3233 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3234 && !is_global_var (var))
3236 new_var = maybe_lookup_decl (var, ctx);
3237 if (new_var == NULL_TREE)
3238 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3239 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3240 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3241 omp_clause_aligned_alignment (c));
3242 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3243 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3244 gimplify_and_add (x, ilist);
3246 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3247 && is_global_var (var))
3249 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3250 new_var = lookup_decl (var, ctx);
3251 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3252 t = build_fold_addr_expr_loc (clause_loc, t);
3253 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3254 t = build_call_expr_loc (clause_loc, t2, 2, t,
3255 omp_clause_aligned_alignment (c));
3256 t = fold_convert_loc (clause_loc, ptype, t);
3257 x = create_tmp_var (ptype, NULL);
3258 t = build2 (MODIFY_EXPR, ptype, x, t);
3259 gimplify_and_add (t, ilist);
3260 t = build_simple_mem_ref_loc (clause_loc, x);
3261 SET_DECL_VALUE_EXPR (new_var, t);
3262 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3264 continue;
3265 default:
3266 continue;
3269 new_var = var = OMP_CLAUSE_DECL (c);
3270 if (c_kind != OMP_CLAUSE_COPYIN)
3271 new_var = lookup_decl (var, ctx);
3273 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3275 if (pass != 0)
3276 continue;
3278 else if (is_variable_sized (var))
3280 /* For variable sized types, we need to allocate the
3281 actual storage here. Call alloca and store the
3282 result in the pointer decl that we created elsewhere. */
3283 if (pass == 0)
3284 continue;
3286 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3288 gimple stmt;
3289 tree tmp, atmp;
3291 ptr = DECL_VALUE_EXPR (new_var);
3292 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3293 ptr = TREE_OPERAND (ptr, 0);
3294 gcc_assert (DECL_P (ptr));
3295 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3297 /* void *tmp = __builtin_alloca */
3298 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3299 stmt = gimple_build_call (atmp, 1, x);
3300 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3301 gimple_add_tmp_var (tmp);
3302 gimple_call_set_lhs (stmt, tmp);
3304 gimple_seq_add_stmt (ilist, stmt);
3306 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3307 gimplify_assign (ptr, x, ilist);
3310 else if (is_reference (var))
3312 /* For references that are being privatized for Fortran,
3313 allocate new backing storage for the new pointer
3314 variable. This allows us to avoid changing all the
3315 code that expects a pointer to something that expects
3316 a direct variable. */
3317 if (pass == 0)
3318 continue;
3320 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3321 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3323 x = build_receiver_ref (var, false, ctx);
3324 x = build_fold_addr_expr_loc (clause_loc, x);
3326 else if (TREE_CONSTANT (x))
3328 /* For reduction in SIMD loop, defer adding the
3329 initialization of the reference, because if we decide
3330 to use SIMD array for it, the initilization could cause
3331 expansion ICE. */
3332 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3333 x = NULL_TREE;
3334 else
3336 const char *name = NULL;
3337 if (DECL_NAME (var))
3338 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3340 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3341 name);
3342 gimple_add_tmp_var (x);
3343 TREE_ADDRESSABLE (x) = 1;
3344 x = build_fold_addr_expr_loc (clause_loc, x);
3347 else
3349 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3350 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3353 if (x)
3355 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3356 gimplify_assign (new_var, x, ilist);
3359 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3361 else if (c_kind == OMP_CLAUSE_REDUCTION
3362 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3364 if (pass == 0)
3365 continue;
3367 else if (pass != 0)
3368 continue;
3370 switch (OMP_CLAUSE_CODE (c))
3372 case OMP_CLAUSE_SHARED:
3373 /* Ignore shared directives in teams construct. */
3374 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3375 continue;
3376 /* Shared global vars are just accessed directly. */
3377 if (is_global_var (new_var))
3378 break;
3379 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3380 needs to be delayed until after fixup_child_record_type so
3381 that we get the correct type during the dereference. */
3382 by_ref = use_pointer_for_field (var, ctx);
3383 x = build_receiver_ref (var, by_ref, ctx);
3384 SET_DECL_VALUE_EXPR (new_var, x);
3385 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3387 /* ??? If VAR is not passed by reference, and the variable
3388 hasn't been initialized yet, then we'll get a warning for
3389 the store into the omp_data_s structure. Ideally, we'd be
3390 able to notice this and not store anything at all, but
3391 we're generating code too early. Suppress the warning. */
3392 if (!by_ref)
3393 TREE_NO_WARNING (var) = 1;
3394 break;
3396 case OMP_CLAUSE_LASTPRIVATE:
3397 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3398 break;
3399 /* FALLTHRU */
3401 case OMP_CLAUSE_PRIVATE:
3402 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3403 x = build_outer_var_ref (var, ctx);
3404 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3406 if (is_task_ctx (ctx))
3407 x = build_receiver_ref (var, false, ctx);
3408 else
3409 x = build_outer_var_ref (var, ctx);
3411 else
3412 x = NULL;
3413 do_private:
3414 tree nx;
3415 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3416 if (is_simd)
3418 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3419 if ((TREE_ADDRESSABLE (new_var) || nx || y
3420 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3421 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3422 idx, lane, ivar, lvar))
3424 if (nx)
3425 x = lang_hooks.decls.omp_clause_default_ctor
3426 (c, unshare_expr (ivar), x);
3427 if (nx && x)
3428 gimplify_and_add (x, &llist[0]);
3429 if (y)
3431 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3432 if (y)
3434 gimple_seq tseq = NULL;
3436 dtor = y;
3437 gimplify_stmt (&dtor, &tseq);
3438 gimple_seq_add_seq (&llist[1], tseq);
3441 break;
3444 if (nx)
3445 gimplify_and_add (nx, ilist);
3446 /* FALLTHRU */
3448 do_dtor:
3449 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3450 if (x)
3452 gimple_seq tseq = NULL;
3454 dtor = x;
3455 gimplify_stmt (&dtor, &tseq);
3456 gimple_seq_add_seq (dlist, tseq);
3458 break;
3460 case OMP_CLAUSE_LINEAR:
3461 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3462 goto do_firstprivate;
3463 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3464 x = NULL;
3465 else
3466 x = build_outer_var_ref (var, ctx);
3467 goto do_private;
3469 case OMP_CLAUSE_FIRSTPRIVATE:
3470 if (is_task_ctx (ctx))
3472 if (is_reference (var) || is_variable_sized (var))
3473 goto do_dtor;
3474 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3475 ctx))
3476 || use_pointer_for_field (var, NULL))
3478 x = build_receiver_ref (var, false, ctx);
3479 SET_DECL_VALUE_EXPR (new_var, x);
3480 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3481 goto do_dtor;
3484 do_firstprivate:
3485 x = build_outer_var_ref (var, ctx);
3486 if (is_simd)
3488 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3489 && gimple_omp_for_combined_into_p (ctx->stmt))
3491 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3492 tree stept = TREE_TYPE (t);
3493 tree ct = find_omp_clause (clauses,
3494 OMP_CLAUSE__LOOPTEMP_);
3495 gcc_assert (ct);
3496 tree l = OMP_CLAUSE_DECL (ct);
3497 tree n1 = fd->loop.n1;
3498 tree step = fd->loop.step;
3499 tree itype = TREE_TYPE (l);
3500 if (POINTER_TYPE_P (itype))
3501 itype = signed_type_for (itype);
3502 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3503 if (TYPE_UNSIGNED (itype)
3504 && fd->loop.cond_code == GT_EXPR)
3505 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3506 fold_build1 (NEGATE_EXPR, itype, l),
3507 fold_build1 (NEGATE_EXPR,
3508 itype, step));
3509 else
3510 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3511 t = fold_build2 (MULT_EXPR, stept,
3512 fold_convert (stept, l), t);
3514 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3516 x = lang_hooks.decls.omp_clause_linear_ctor
3517 (c, new_var, x, t);
3518 gimplify_and_add (x, ilist);
3519 goto do_dtor;
3522 if (POINTER_TYPE_P (TREE_TYPE (x)))
3523 x = fold_build2 (POINTER_PLUS_EXPR,
3524 TREE_TYPE (x), x, t);
3525 else
3526 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3529 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3530 || TREE_ADDRESSABLE (new_var))
3531 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3532 idx, lane, ivar, lvar))
3534 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3536 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3537 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3538 gimplify_and_add (x, ilist);
3539 gimple_stmt_iterator gsi
3540 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3541 gimple g
3542 = gimple_build_assign (unshare_expr (lvar), iv);
3543 gsi_insert_before_without_update (&gsi, g,
3544 GSI_SAME_STMT);
3545 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3546 enum tree_code code = PLUS_EXPR;
3547 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3548 code = POINTER_PLUS_EXPR;
3549 g = gimple_build_assign_with_ops (code, iv, iv, t);
3550 gsi_insert_before_without_update (&gsi, g,
3551 GSI_SAME_STMT);
3552 break;
3554 x = lang_hooks.decls.omp_clause_copy_ctor
3555 (c, unshare_expr (ivar), x);
3556 gimplify_and_add (x, &llist[0]);
3557 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3558 if (x)
3560 gimple_seq tseq = NULL;
3562 dtor = x;
3563 gimplify_stmt (&dtor, &tseq);
3564 gimple_seq_add_seq (&llist[1], tseq);
3566 break;
3569 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3570 gimplify_and_add (x, ilist);
3571 goto do_dtor;
3573 case OMP_CLAUSE__LOOPTEMP_:
3574 gcc_assert (is_parallel_ctx (ctx));
3575 x = build_outer_var_ref (var, ctx);
3576 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3577 gimplify_and_add (x, ilist);
3578 break;
3580 case OMP_CLAUSE_COPYIN:
3581 by_ref = use_pointer_for_field (var, NULL);
3582 x = build_receiver_ref (var, by_ref, ctx);
3583 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3584 append_to_statement_list (x, &copyin_seq);
3585 copyin_by_ref |= by_ref;
3586 break;
3588 case OMP_CLAUSE_REDUCTION:
3589 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3591 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3592 gimple tseq;
3593 x = build_outer_var_ref (var, ctx);
3595 if (is_reference (var)
3596 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3597 TREE_TYPE (x)))
3598 x = build_fold_addr_expr_loc (clause_loc, x);
3599 SET_DECL_VALUE_EXPR (placeholder, x);
3600 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3601 tree new_vard = new_var;
3602 if (is_reference (var))
3604 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3605 new_vard = TREE_OPERAND (new_var, 0);
3606 gcc_assert (DECL_P (new_vard));
3608 if (is_simd
3609 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3610 idx, lane, ivar, lvar))
3612 if (new_vard == new_var)
3614 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3615 SET_DECL_VALUE_EXPR (new_var, ivar);
3617 else
3619 SET_DECL_VALUE_EXPR (new_vard,
3620 build_fold_addr_expr (ivar));
3621 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3623 x = lang_hooks.decls.omp_clause_default_ctor
3624 (c, unshare_expr (ivar),
3625 build_outer_var_ref (var, ctx));
3626 if (x)
3627 gimplify_and_add (x, &llist[0]);
3628 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3630 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3631 lower_omp (&tseq, ctx);
3632 gimple_seq_add_seq (&llist[0], tseq);
3634 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3635 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3636 lower_omp (&tseq, ctx);
3637 gimple_seq_add_seq (&llist[1], tseq);
3638 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3639 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3640 if (new_vard == new_var)
3641 SET_DECL_VALUE_EXPR (new_var, lvar);
3642 else
3643 SET_DECL_VALUE_EXPR (new_vard,
3644 build_fold_addr_expr (lvar));
3645 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3646 if (x)
3648 tseq = NULL;
3649 dtor = x;
3650 gimplify_stmt (&dtor, &tseq);
3651 gimple_seq_add_seq (&llist[1], tseq);
3653 break;
3655 /* If this is a reference to constant size reduction var
3656 with placeholder, we haven't emitted the initializer
3657 for it because it is undesirable if SIMD arrays are used.
3658 But if they aren't used, we need to emit the deferred
3659 initialization now. */
3660 else if (is_reference (var) && is_simd)
3661 handle_simd_reference (clause_loc, new_vard, ilist);
3662 x = lang_hooks.decls.omp_clause_default_ctor
3663 (c, unshare_expr (new_var),
3664 build_outer_var_ref (var, ctx));
3665 if (x)
3666 gimplify_and_add (x, ilist);
3667 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3669 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3670 lower_omp (&tseq, ctx);
3671 gimple_seq_add_seq (ilist, tseq);
3673 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3674 if (is_simd)
3676 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3677 lower_omp (&tseq, ctx);
3678 gimple_seq_add_seq (dlist, tseq);
3679 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3681 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3682 goto do_dtor;
3684 else
3686 x = omp_reduction_init (c, TREE_TYPE (new_var));
3687 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3688 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3690 /* reduction(-:var) sums up the partial results, so it
3691 acts identically to reduction(+:var). */
3692 if (code == MINUS_EXPR)
3693 code = PLUS_EXPR;
3695 tree new_vard = new_var;
3696 if (is_simd && is_reference (var))
3698 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3699 new_vard = TREE_OPERAND (new_var, 0);
3700 gcc_assert (DECL_P (new_vard));
3702 if (is_simd
3703 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3704 idx, lane, ivar, lvar))
3706 tree ref = build_outer_var_ref (var, ctx);
3708 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3710 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3711 ref = build_outer_var_ref (var, ctx);
3712 gimplify_assign (ref, x, &llist[1]);
3714 if (new_vard != new_var)
3716 SET_DECL_VALUE_EXPR (new_vard,
3717 build_fold_addr_expr (lvar));
3718 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3721 else
3723 if (is_reference (var) && is_simd)
3724 handle_simd_reference (clause_loc, new_vard, ilist);
3725 gimplify_assign (new_var, x, ilist);
3726 if (is_simd)
3728 tree ref = build_outer_var_ref (var, ctx);
3730 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3731 ref = build_outer_var_ref (var, ctx);
3732 gimplify_assign (ref, x, dlist);
3736 break;
3738 default:
3739 gcc_unreachable ();
3744 if (lane)
3746 tree uid = create_tmp_var (ptr_type_node, "simduid");
3747 /* Don't want uninit warnings on simduid, it is always uninitialized,
3748 but we use it not for the value, but for the DECL_UID only. */
3749 TREE_NO_WARNING (uid) = 1;
3750 gimple g
3751 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3752 gimple_call_set_lhs (g, lane);
3753 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3754 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3755 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3756 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3757 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3758 gimple_omp_for_set_clauses (ctx->stmt, c);
3759 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3760 build_int_cst (unsigned_type_node, 0),
3761 NULL_TREE);
3762 gimple_seq_add_stmt (ilist, g);
3763 for (int i = 0; i < 2; i++)
3764 if (llist[i])
3766 tree vf = create_tmp_var (unsigned_type_node, NULL);
3767 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3768 gimple_call_set_lhs (g, vf);
3769 gimple_seq *seq = i == 0 ? ilist : dlist;
3770 gimple_seq_add_stmt (seq, g);
3771 tree t = build_int_cst (unsigned_type_node, 0);
3772 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3773 gimple_seq_add_stmt (seq, g);
3774 tree body = create_artificial_label (UNKNOWN_LOCATION);
3775 tree header = create_artificial_label (UNKNOWN_LOCATION);
3776 tree end = create_artificial_label (UNKNOWN_LOCATION);
3777 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3778 gimple_seq_add_stmt (seq, gimple_build_label (body));
3779 gimple_seq_add_seq (seq, llist[i]);
3780 t = build_int_cst (unsigned_type_node, 1);
3781 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3782 gimple_seq_add_stmt (seq, g);
3783 gimple_seq_add_stmt (seq, gimple_build_label (header));
3784 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3785 gimple_seq_add_stmt (seq, g);
3786 gimple_seq_add_stmt (seq, gimple_build_label (end));
3790 /* The copyin sequence is not to be executed by the main thread, since
3791 that would result in self-copies. Perhaps not visible to scalars,
3792 but it certainly is to C++ operator=. */
3793 if (copyin_seq)
3795 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3797 x = build2 (NE_EXPR, boolean_type_node, x,
3798 build_int_cst (TREE_TYPE (x), 0));
3799 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3800 gimplify_and_add (x, ilist);
3803 /* If any copyin variable is passed by reference, we must ensure the
3804 master thread doesn't modify it before it is copied over in all
3805 threads. Similarly for variables in both firstprivate and
3806 lastprivate clauses we need to ensure the lastprivate copying
3807 happens after firstprivate copying in all threads. And similarly
3808 for UDRs if initializer expression refers to omp_orig. */
3809 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3811 /* Don't add any barrier for #pragma omp simd or
3812 #pragma omp distribute. */
3813 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3814 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
3815 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3818 /* If max_vf is non-zero, then we can use only a vectorization factor
3819 up to the max_vf we chose. So stick it into the safelen clause. */
3820 if (max_vf)
3822 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3823 OMP_CLAUSE_SAFELEN);
3824 if (c == NULL_TREE
3825 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
3826 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3827 max_vf) == 1))
3829 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3830 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3831 max_vf);
3832 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3833 gimple_omp_for_set_clauses (ctx->stmt, c);
3839 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3840 both parallel and workshare constructs. PREDICATE may be NULL if it's
3841 always true. */
3843 static void
3844 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3845 omp_context *ctx)
3847 tree x, c, label = NULL, orig_clauses = clauses;
3848 bool par_clauses = false;
3849 tree simduid = NULL, lastlane = NULL;
3851 /* Early exit if there are no lastprivate or linear clauses. */
3852 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3853 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3854 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3855 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3856 break;
3857 if (clauses == NULL)
3859 /* If this was a workshare clause, see if it had been combined
3860 with its parallel. In that case, look for the clauses on the
3861 parallel statement itself. */
3862 if (is_parallel_ctx (ctx))
3863 return;
3865 ctx = ctx->outer;
3866 if (ctx == NULL || !is_parallel_ctx (ctx))
3867 return;
3869 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3870 OMP_CLAUSE_LASTPRIVATE);
3871 if (clauses == NULL)
3872 return;
3873 par_clauses = true;
3876 if (predicate)
3878 gimple stmt;
3879 tree label_true, arm1, arm2;
3881 label = create_artificial_label (UNKNOWN_LOCATION);
3882 label_true = create_artificial_label (UNKNOWN_LOCATION);
3883 arm1 = TREE_OPERAND (predicate, 0);
3884 arm2 = TREE_OPERAND (predicate, 1);
3885 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3886 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3887 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3888 label_true, label);
3889 gimple_seq_add_stmt (stmt_list, stmt);
3890 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3893 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3894 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3896 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3897 if (simduid)
3898 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3901 for (c = clauses; c ;)
3903 tree var, new_var;
3904 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3906 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3907 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3908 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3910 var = OMP_CLAUSE_DECL (c);
3911 new_var = lookup_decl (var, ctx);
3913 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3915 tree val = DECL_VALUE_EXPR (new_var);
3916 if (TREE_CODE (val) == ARRAY_REF
3917 && VAR_P (TREE_OPERAND (val, 0))
3918 && lookup_attribute ("omp simd array",
3919 DECL_ATTRIBUTES (TREE_OPERAND (val,
3920 0))))
3922 if (lastlane == NULL)
3924 lastlane = create_tmp_var (unsigned_type_node, NULL);
3925 gimple g
3926 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3927 2, simduid,
3928 TREE_OPERAND (val, 1));
3929 gimple_call_set_lhs (g, lastlane);
3930 gimple_seq_add_stmt (stmt_list, g);
3932 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3933 TREE_OPERAND (val, 0), lastlane,
3934 NULL_TREE, NULL_TREE);
3938 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3939 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
3941 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
3942 gimple_seq_add_seq (stmt_list,
3943 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
3944 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
3946 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3947 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
3949 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
3950 gimple_seq_add_seq (stmt_list,
3951 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
3952 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
3955 x = build_outer_var_ref (var, ctx);
3956 if (is_reference (var))
3957 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3958 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
3959 gimplify_and_add (x, stmt_list);
3961 c = OMP_CLAUSE_CHAIN (c);
3962 if (c == NULL && !par_clauses)
3964 /* If this was a workshare clause, see if it had been combined
3965 with its parallel. In that case, continue looking for the
3966 clauses also on the parallel statement itself. */
3967 if (is_parallel_ctx (ctx))
3968 break;
3970 ctx = ctx->outer;
3971 if (ctx == NULL || !is_parallel_ctx (ctx))
3972 break;
3974 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3975 OMP_CLAUSE_LASTPRIVATE);
3976 par_clauses = true;
3980 if (label)
3981 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
3985 /* Generate code to implement the REDUCTION clauses. */
3987 static void
3988 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
3990 gimple_seq sub_seq = NULL;
3991 gimple stmt;
3992 tree x, c;
3993 int count = 0;
3995 /* SIMD reductions are handled in lower_rec_input_clauses. */
3996 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3997 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3998 return;
4000 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
4001 update in that case, otherwise use a lock. */
4002 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
4003 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
4005 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4007 /* Never use OMP_ATOMIC for array reductions or UDRs. */
4008 count = -1;
4009 break;
4011 count++;
4014 if (count == 0)
4015 return;
4017 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4019 tree var, ref, new_var;
4020 enum tree_code code;
4021 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4023 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
4024 continue;
4026 var = OMP_CLAUSE_DECL (c);
4027 new_var = lookup_decl (var, ctx);
4028 if (is_reference (var))
4029 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4030 ref = build_outer_var_ref (var, ctx);
4031 code = OMP_CLAUSE_REDUCTION_CODE (c);
4033 /* reduction(-:var) sums up the partial results, so it acts
4034 identically to reduction(+:var). */
4035 if (code == MINUS_EXPR)
4036 code = PLUS_EXPR;
4038 if (count == 1)
4040 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
4042 addr = save_expr (addr);
4043 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
4044 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
4045 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
4046 gimplify_and_add (x, stmt_seqp);
4047 return;
4050 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4052 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4054 if (is_reference (var)
4055 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4056 TREE_TYPE (ref)))
4057 ref = build_fold_addr_expr_loc (clause_loc, ref);
4058 SET_DECL_VALUE_EXPR (placeholder, ref);
4059 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4060 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
4061 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
4062 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4063 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
4065 else
4067 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4068 ref = build_outer_var_ref (var, ctx);
4069 gimplify_assign (ref, x, &sub_seq);
4073 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4075 gimple_seq_add_stmt (stmt_seqp, stmt);
4077 gimple_seq_add_seq (stmt_seqp, sub_seq);
4079 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4081 gimple_seq_add_stmt (stmt_seqp, stmt);
4085 /* Generate code to implement the COPYPRIVATE clauses. */
4087 static void
4088 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4089 omp_context *ctx)
4091 tree c;
4093 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4095 tree var, new_var, ref, x;
4096 bool by_ref;
4097 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4099 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4100 continue;
4102 var = OMP_CLAUSE_DECL (c);
4103 by_ref = use_pointer_for_field (var, NULL);
4105 ref = build_sender_ref (var, ctx);
4106 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4107 if (by_ref)
4109 x = build_fold_addr_expr_loc (clause_loc, new_var);
4110 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4112 gimplify_assign (ref, x, slist);
4114 ref = build_receiver_ref (var, false, ctx);
4115 if (by_ref)
4117 ref = fold_convert_loc (clause_loc,
4118 build_pointer_type (TREE_TYPE (new_var)),
4119 ref);
4120 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4122 if (is_reference (var))
4124 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4125 ref = build_simple_mem_ref_loc (clause_loc, ref);
4126 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4128 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4129 gimplify_and_add (x, rlist);
4134 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4135 and REDUCTION from the sender (aka parent) side. */
4137 static void
4138 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4139 omp_context *ctx)
4141 tree c;
4143 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4145 tree val, ref, x, var;
4146 bool by_ref, do_in = false, do_out = false;
4147 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4149 switch (OMP_CLAUSE_CODE (c))
4151 case OMP_CLAUSE_PRIVATE:
4152 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4153 break;
4154 continue;
4155 case OMP_CLAUSE_FIRSTPRIVATE:
4156 case OMP_CLAUSE_COPYIN:
4157 case OMP_CLAUSE_LASTPRIVATE:
4158 case OMP_CLAUSE_REDUCTION:
4159 case OMP_CLAUSE__LOOPTEMP_:
4160 break;
4161 default:
4162 continue;
4165 val = OMP_CLAUSE_DECL (c);
4166 var = lookup_decl_in_outer_ctx (val, ctx);
4168 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4169 && is_global_var (var))
4170 continue;
4171 if (is_variable_sized (val))
4172 continue;
4173 by_ref = use_pointer_for_field (val, NULL);
4175 switch (OMP_CLAUSE_CODE (c))
4177 case OMP_CLAUSE_PRIVATE:
4178 case OMP_CLAUSE_FIRSTPRIVATE:
4179 case OMP_CLAUSE_COPYIN:
4180 case OMP_CLAUSE__LOOPTEMP_:
4181 do_in = true;
4182 break;
4184 case OMP_CLAUSE_LASTPRIVATE:
4185 if (by_ref || is_reference (val))
4187 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4188 continue;
4189 do_in = true;
4191 else
4193 do_out = true;
4194 if (lang_hooks.decls.omp_private_outer_ref (val))
4195 do_in = true;
4197 break;
4199 case OMP_CLAUSE_REDUCTION:
4200 do_in = true;
4201 do_out = !(by_ref || is_reference (val));
4202 break;
4204 default:
4205 gcc_unreachable ();
4208 if (do_in)
4210 ref = build_sender_ref (val, ctx);
4211 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4212 gimplify_assign (ref, x, ilist);
4213 if (is_task_ctx (ctx))
4214 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4217 if (do_out)
4219 ref = build_sender_ref (val, ctx);
4220 gimplify_assign (var, ref, olist);
4225 /* Generate code to implement SHARED from the sender (aka parent)
4226 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4227 list things that got automatically shared. */
4229 static void
4230 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4232 tree var, ovar, nvar, f, x, record_type;
4234 if (ctx->record_type == NULL)
4235 return;
4237 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4238 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4240 ovar = DECL_ABSTRACT_ORIGIN (f);
4241 nvar = maybe_lookup_decl (ovar, ctx);
4242 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4243 continue;
4245 /* If CTX is a nested parallel directive. Find the immediately
4246 enclosing parallel or workshare construct that contains a
4247 mapping for OVAR. */
4248 var = lookup_decl_in_outer_ctx (ovar, ctx);
4250 if (use_pointer_for_field (ovar, ctx))
4252 x = build_sender_ref (ovar, ctx);
4253 var = build_fold_addr_expr (var);
4254 gimplify_assign (x, var, ilist);
4256 else
4258 x = build_sender_ref (ovar, ctx);
4259 gimplify_assign (x, var, ilist);
4261 if (!TREE_READONLY (var)
4262 /* We don't need to receive a new reference to a result
4263 or parm decl. In fact we may not store to it as we will
4264 invalidate any pending RSO and generate wrong gimple
4265 during inlining. */
4266 && !((TREE_CODE (var) == RESULT_DECL
4267 || TREE_CODE (var) == PARM_DECL)
4268 && DECL_BY_REFERENCE (var)))
4270 x = build_sender_ref (ovar, ctx);
4271 gimplify_assign (var, x, olist);
4278 /* A convenience function to build an empty GIMPLE_COND with just the
4279 condition. */
4281 static gimple
4282 gimple_build_cond_empty (tree cond)
4284 enum tree_code pred_code;
4285 tree lhs, rhs;
4287 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4288 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4292 /* Build the function calls to GOMP_parallel_start etc to actually
4293 generate the parallel operation. REGION is the parallel region
4294 being expanded. BB is the block where to insert the code. WS_ARGS
4295 will be set if this is a call to a combined parallel+workshare
4296 construct, it contains the list of additional arguments needed by
4297 the workshare construct. */
4299 static void
4300 expand_parallel_call (struct omp_region *region, basic_block bb,
4301 gimple entry_stmt, vec<tree, va_gc> *ws_args)
4303 tree t, t1, t2, val, cond, c, clauses, flags;
4304 gimple_stmt_iterator gsi;
4305 gimple stmt;
4306 enum built_in_function start_ix;
4307 int start_ix2;
4308 location_t clause_loc;
4309 vec<tree, va_gc> *args;
4311 clauses = gimple_omp_parallel_clauses (entry_stmt);
4313 /* Determine what flavor of GOMP_parallel we will be
4314 emitting. */
4315 start_ix = BUILT_IN_GOMP_PARALLEL;
4316 if (is_combined_parallel (region))
4318 switch (region->inner->type)
4320 case GIMPLE_OMP_FOR:
4321 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4322 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4323 + (region->inner->sched_kind
4324 == OMP_CLAUSE_SCHEDULE_RUNTIME
4325 ? 3 : region->inner->sched_kind));
4326 start_ix = (enum built_in_function)start_ix2;
4327 break;
4328 case GIMPLE_OMP_SECTIONS:
4329 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4330 break;
4331 default:
4332 gcc_unreachable ();
4336 /* By default, the value of NUM_THREADS is zero (selected at run time)
4337 and there is no conditional. */
4338 cond = NULL_TREE;
4339 val = build_int_cst (unsigned_type_node, 0);
4340 flags = build_int_cst (unsigned_type_node, 0);
4342 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4343 if (c)
4344 cond = OMP_CLAUSE_IF_EXPR (c);
4346 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4347 if (c)
4349 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4350 clause_loc = OMP_CLAUSE_LOCATION (c);
4352 else
4353 clause_loc = gimple_location (entry_stmt);
4355 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4356 if (c)
4357 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4359 /* Ensure 'val' is of the correct type. */
4360 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4362 /* If we found the clause 'if (cond)', build either
4363 (cond != 0) or (cond ? val : 1u). */
4364 if (cond)
4366 cond = gimple_boolify (cond);
4368 if (integer_zerop (val))
4369 val = fold_build2_loc (clause_loc,
4370 EQ_EXPR, unsigned_type_node, cond,
4371 build_int_cst (TREE_TYPE (cond), 0));
4372 else
4374 basic_block cond_bb, then_bb, else_bb;
4375 edge e, e_then, e_else;
4376 tree tmp_then, tmp_else, tmp_join, tmp_var;
4378 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4379 if (gimple_in_ssa_p (cfun))
4381 tmp_then = make_ssa_name (tmp_var, NULL);
4382 tmp_else = make_ssa_name (tmp_var, NULL);
4383 tmp_join = make_ssa_name (tmp_var, NULL);
4385 else
4387 tmp_then = tmp_var;
4388 tmp_else = tmp_var;
4389 tmp_join = tmp_var;
4392 e = split_block (bb, NULL);
4393 cond_bb = e->src;
4394 bb = e->dest;
4395 remove_edge (e);
4397 then_bb = create_empty_bb (cond_bb);
4398 else_bb = create_empty_bb (then_bb);
4399 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4400 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4402 stmt = gimple_build_cond_empty (cond);
4403 gsi = gsi_start_bb (cond_bb);
4404 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4406 gsi = gsi_start_bb (then_bb);
4407 stmt = gimple_build_assign (tmp_then, val);
4408 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4410 gsi = gsi_start_bb (else_bb);
4411 stmt = gimple_build_assign
4412 (tmp_else, build_int_cst (unsigned_type_node, 1));
4413 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4415 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4416 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4417 add_bb_to_loop (then_bb, cond_bb->loop_father);
4418 add_bb_to_loop (else_bb, cond_bb->loop_father);
4419 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4420 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4422 if (gimple_in_ssa_p (cfun))
4424 gimple phi = create_phi_node (tmp_join, bb);
4425 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4426 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4429 val = tmp_join;
4432 gsi = gsi_start_bb (bb);
4433 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4434 false, GSI_CONTINUE_LINKING);
4437 gsi = gsi_last_bb (bb);
4438 t = gimple_omp_parallel_data_arg (entry_stmt);
4439 if (t == NULL)
4440 t1 = null_pointer_node;
4441 else
4442 t1 = build_fold_addr_expr (t);
4443 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4445 vec_alloc (args, 4 + vec_safe_length (ws_args));
4446 args->quick_push (t2);
4447 args->quick_push (t1);
4448 args->quick_push (val);
4449 if (ws_args)
4450 args->splice (*ws_args);
4451 args->quick_push (flags);
4453 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4454 builtin_decl_explicit (start_ix), args);
4456 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4457 false, GSI_CONTINUE_LINKING);
4460 /* Insert a function call whose name is FUNC_NAME with the information from
4461 ENTRY_STMT into the basic_block BB. */
4463 static void
4464 expand_cilk_for_call (basic_block bb, gimple entry_stmt,
4465 vec <tree, va_gc> *ws_args)
4467 tree t, t1, t2;
4468 gimple_stmt_iterator gsi;
4469 vec <tree, va_gc> *args;
4471 gcc_assert (vec_safe_length (ws_args) == 2);
4472 tree func_name = (*ws_args)[0];
4473 tree grain = (*ws_args)[1];
4475 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
4476 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
4477 gcc_assert (count != NULL_TREE);
4478 count = OMP_CLAUSE_OPERAND (count, 0);
4480 gsi = gsi_last_bb (bb);
4481 t = gimple_omp_parallel_data_arg (entry_stmt);
4482 if (t == NULL)
4483 t1 = null_pointer_node;
4484 else
4485 t1 = build_fold_addr_expr (t);
4486 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4488 vec_alloc (args, 4);
4489 args->quick_push (t2);
4490 args->quick_push (t1);
4491 args->quick_push (count);
4492 args->quick_push (grain);
4493 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
4495 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
4496 GSI_CONTINUE_LINKING);
4499 /* Build the function call to GOMP_task to actually
4500 generate the task operation. BB is the block where to insert the code. */
4502 static void
4503 expand_task_call (basic_block bb, gimple entry_stmt)
4505 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4506 gimple_stmt_iterator gsi;
4507 location_t loc = gimple_location (entry_stmt);
4509 clauses = gimple_omp_task_clauses (entry_stmt);
4511 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4512 if (c)
4513 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4514 else
4515 cond = boolean_true_node;
4517 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4518 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4519 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4520 flags = build_int_cst (unsigned_type_node,
4521 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4523 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4524 if (c)
4526 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4527 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4528 build_int_cst (unsigned_type_node, 2),
4529 build_int_cst (unsigned_type_node, 0));
4530 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4532 if (depend)
4533 depend = OMP_CLAUSE_DECL (depend);
4534 else
4535 depend = build_int_cst (ptr_type_node, 0);
4537 gsi = gsi_last_bb (bb);
4538 t = gimple_omp_task_data_arg (entry_stmt);
4539 if (t == NULL)
4540 t2 = null_pointer_node;
4541 else
4542 t2 = build_fold_addr_expr_loc (loc, t);
4543 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4544 t = gimple_omp_task_copy_fn (entry_stmt);
4545 if (t == NULL)
4546 t3 = null_pointer_node;
4547 else
4548 t3 = build_fold_addr_expr_loc (loc, t);
4550 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4551 8, t1, t2, t3,
4552 gimple_omp_task_arg_size (entry_stmt),
4553 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4554 depend);
4556 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4557 false, GSI_CONTINUE_LINKING);
4561 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4562 catch handler and return it. This prevents programs from violating the
4563 structured block semantics with throws. */
4565 static gimple_seq
4566 maybe_catch_exception (gimple_seq body)
4568 gimple g;
4569 tree decl;
4571 if (!flag_exceptions)
4572 return body;
4574 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4575 decl = lang_hooks.eh_protect_cleanup_actions ();
4576 else
4577 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4579 g = gimple_build_eh_must_not_throw (decl);
4580 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4581 GIMPLE_TRY_CATCH);
4583 return gimple_seq_alloc_with_stmt (g);
4586 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4588 static tree
4589 vec2chain (vec<tree, va_gc> *v)
4591 tree chain = NULL_TREE, t;
4592 unsigned ix;
4594 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4596 DECL_CHAIN (t) = chain;
4597 chain = t;
4600 return chain;
4604 /* Remove barriers in REGION->EXIT's block. Note that this is only
4605 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4606 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4607 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4608 removed. */
4610 static void
4611 remove_exit_barrier (struct omp_region *region)
4613 gimple_stmt_iterator gsi;
4614 basic_block exit_bb;
4615 edge_iterator ei;
4616 edge e;
4617 gimple stmt;
4618 int any_addressable_vars = -1;
4620 exit_bb = region->exit;
4622 /* If the parallel region doesn't return, we don't have REGION->EXIT
4623 block at all. */
4624 if (! exit_bb)
4625 return;
4627 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4628 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4629 statements that can appear in between are extremely limited -- no
4630 memory operations at all. Here, we allow nothing at all, so the
4631 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4632 gsi = gsi_last_bb (exit_bb);
4633 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4634 gsi_prev (&gsi);
4635 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4636 return;
4638 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4640 gsi = gsi_last_bb (e->src);
4641 if (gsi_end_p (gsi))
4642 continue;
4643 stmt = gsi_stmt (gsi);
4644 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4645 && !gimple_omp_return_nowait_p (stmt))
4647 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4648 in many cases. If there could be tasks queued, the barrier
4649 might be needed to let the tasks run before some local
4650 variable of the parallel that the task uses as shared
4651 runs out of scope. The task can be spawned either
4652 from within current function (this would be easy to check)
4653 or from some function it calls and gets passed an address
4654 of such a variable. */
4655 if (any_addressable_vars < 0)
4657 gimple parallel_stmt = last_stmt (region->entry);
4658 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4659 tree local_decls, block, decl;
4660 unsigned ix;
4662 any_addressable_vars = 0;
4663 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4664 if (TREE_ADDRESSABLE (decl))
4666 any_addressable_vars = 1;
4667 break;
4669 for (block = gimple_block (stmt);
4670 !any_addressable_vars
4671 && block
4672 && TREE_CODE (block) == BLOCK;
4673 block = BLOCK_SUPERCONTEXT (block))
4675 for (local_decls = BLOCK_VARS (block);
4676 local_decls;
4677 local_decls = DECL_CHAIN (local_decls))
4678 if (TREE_ADDRESSABLE (local_decls))
4680 any_addressable_vars = 1;
4681 break;
4683 if (block == gimple_block (parallel_stmt))
4684 break;
4687 if (!any_addressable_vars)
4688 gimple_omp_return_set_nowait (stmt);
4693 static void
4694 remove_exit_barriers (struct omp_region *region)
4696 if (region->type == GIMPLE_OMP_PARALLEL)
4697 remove_exit_barrier (region);
4699 if (region->inner)
4701 region = region->inner;
4702 remove_exit_barriers (region);
4703 while (region->next)
4705 region = region->next;
4706 remove_exit_barriers (region);
4711 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4712 calls. These can't be declared as const functions, but
4713 within one parallel body they are constant, so they can be
4714 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4715 which are declared const. Similarly for task body, except
4716 that in untied task omp_get_thread_num () can change at any task
4717 scheduling point. */
4719 static void
4720 optimize_omp_library_calls (gimple entry_stmt)
4722 basic_block bb;
4723 gimple_stmt_iterator gsi;
4724 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4725 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4726 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4727 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4728 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4729 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4730 OMP_CLAUSE_UNTIED) != NULL);
4732 FOR_EACH_BB_FN (bb, cfun)
4733 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4735 gimple call = gsi_stmt (gsi);
4736 tree decl;
4738 if (is_gimple_call (call)
4739 && (decl = gimple_call_fndecl (call))
4740 && DECL_EXTERNAL (decl)
4741 && TREE_PUBLIC (decl)
4742 && DECL_INITIAL (decl) == NULL)
4744 tree built_in;
4746 if (DECL_NAME (decl) == thr_num_id)
4748 /* In #pragma omp task untied omp_get_thread_num () can change
4749 during the execution of the task region. */
4750 if (untied_task)
4751 continue;
4752 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4754 else if (DECL_NAME (decl) == num_thr_id)
4755 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4756 else
4757 continue;
4759 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4760 || gimple_call_num_args (call) != 0)
4761 continue;
4763 if (flag_exceptions && !TREE_NOTHROW (decl))
4764 continue;
4766 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4767 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4768 TREE_TYPE (TREE_TYPE (built_in))))
4769 continue;
4771 gimple_call_set_fndecl (call, built_in);
4776 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4777 regimplified. */
4779 static tree
4780 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4782 tree t = *tp;
4784 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4785 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4786 return t;
4788 if (TREE_CODE (t) == ADDR_EXPR)
4789 recompute_tree_invariant_for_addr_expr (t);
4791 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4792 return NULL_TREE;
4795 /* Prepend TO = FROM assignment before *GSI_P. */
4797 static void
4798 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4800 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4801 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4802 true, GSI_SAME_STMT);
4803 gimple stmt = gimple_build_assign (to, from);
4804 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4805 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4806 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4808 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4809 gimple_regimplify_operands (stmt, &gsi);
4813 /* Expand the OpenMP parallel or task directive starting at REGION. */
4815 static void
4816 expand_omp_taskreg (struct omp_region *region)
4818 basic_block entry_bb, exit_bb, new_bb;
4819 struct function *child_cfun;
4820 tree child_fn, block, t;
4821 gimple_stmt_iterator gsi;
4822 gimple entry_stmt, stmt;
4823 edge e;
4824 vec<tree, va_gc> *ws_args;
4826 entry_stmt = last_stmt (region->entry);
4827 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4828 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4830 entry_bb = region->entry;
4831 exit_bb = region->exit;
4833 bool is_cilk_for
4834 = (flag_cilkplus
4835 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
4836 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
4837 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
4839 if (is_cilk_for)
4840 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
4841 and the inner statement contains the name of the built-in function
4842 and grain. */
4843 ws_args = region->inner->ws_args;
4844 else if (is_combined_parallel (region))
4845 ws_args = region->ws_args;
4846 else
4847 ws_args = NULL;
4849 if (child_cfun->cfg)
4851 /* Due to inlining, it may happen that we have already outlined
4852 the region, in which case all we need to do is make the
4853 sub-graph unreachable and emit the parallel call. */
4854 edge entry_succ_e, exit_succ_e;
4856 entry_succ_e = single_succ_edge (entry_bb);
4858 gsi = gsi_last_bb (entry_bb);
4859 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4860 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4861 gsi_remove (&gsi, true);
4863 new_bb = entry_bb;
4864 if (exit_bb)
4866 exit_succ_e = single_succ_edge (exit_bb);
4867 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4869 remove_edge_and_dominated_blocks (entry_succ_e);
4871 else
4873 unsigned srcidx, dstidx, num;
4875 /* If the parallel region needs data sent from the parent
4876 function, then the very first statement (except possible
4877 tree profile counter updates) of the parallel body
4878 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4879 &.OMP_DATA_O is passed as an argument to the child function,
4880 we need to replace it with the argument as seen by the child
4881 function.
4883 In most cases, this will end up being the identity assignment
4884 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4885 a function call that has been inlined, the original PARM_DECL
4886 .OMP_DATA_I may have been converted into a different local
4887 variable. In which case, we need to keep the assignment. */
4888 if (gimple_omp_taskreg_data_arg (entry_stmt))
4890 basic_block entry_succ_bb = single_succ (entry_bb);
4891 tree arg, narg;
4892 gimple parcopy_stmt = NULL;
4894 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4896 gimple stmt;
4898 gcc_assert (!gsi_end_p (gsi));
4899 stmt = gsi_stmt (gsi);
4900 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4901 continue;
4903 if (gimple_num_ops (stmt) == 2)
4905 tree arg = gimple_assign_rhs1 (stmt);
4907 /* We're ignore the subcode because we're
4908 effectively doing a STRIP_NOPS. */
4910 if (TREE_CODE (arg) == ADDR_EXPR
4911 && TREE_OPERAND (arg, 0)
4912 == gimple_omp_taskreg_data_arg (entry_stmt))
4914 parcopy_stmt = stmt;
4915 break;
4920 gcc_assert (parcopy_stmt != NULL);
4921 arg = DECL_ARGUMENTS (child_fn);
4923 if (!gimple_in_ssa_p (cfun))
4925 if (gimple_assign_lhs (parcopy_stmt) == arg)
4926 gsi_remove (&gsi, true);
4927 else
4929 /* ?? Is setting the subcode really necessary ?? */
4930 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4931 gimple_assign_set_rhs1 (parcopy_stmt, arg);
4934 else
4936 /* If we are in ssa form, we must load the value from the default
4937 definition of the argument. That should not be defined now,
4938 since the argument is not used uninitialized. */
4939 gcc_assert (ssa_default_def (cfun, arg) == NULL);
4940 narg = make_ssa_name (arg, gimple_build_nop ());
4941 set_ssa_default_def (cfun, arg, narg);
4942 /* ?? Is setting the subcode really necessary ?? */
4943 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
4944 gimple_assign_set_rhs1 (parcopy_stmt, narg);
4945 update_stmt (parcopy_stmt);
4949 /* Declare local variables needed in CHILD_CFUN. */
4950 block = DECL_INITIAL (child_fn);
4951 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
4952 /* The gimplifier could record temporaries in parallel/task block
4953 rather than in containing function's local_decls chain,
4954 which would mean cgraph missed finalizing them. Do it now. */
4955 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
4956 if (TREE_CODE (t) == VAR_DECL
4957 && TREE_STATIC (t)
4958 && !DECL_EXTERNAL (t))
4959 varpool_node::finalize_decl (t);
4960 DECL_SAVED_TREE (child_fn) = NULL;
4961 /* We'll create a CFG for child_fn, so no gimple body is needed. */
4962 gimple_set_body (child_fn, NULL);
4963 TREE_USED (block) = 1;
4965 /* Reset DECL_CONTEXT on function arguments. */
4966 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
4967 DECL_CONTEXT (t) = child_fn;
4969 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
4970 so that it can be moved to the child function. */
4971 gsi = gsi_last_bb (entry_bb);
4972 stmt = gsi_stmt (gsi);
4973 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
4974 || gimple_code (stmt) == GIMPLE_OMP_TASK));
4975 gsi_remove (&gsi, true);
4976 e = split_block (entry_bb, stmt);
4977 entry_bb = e->dest;
4978 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4980 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
4981 if (exit_bb)
4983 gsi = gsi_last_bb (exit_bb);
4984 gcc_assert (!gsi_end_p (gsi)
4985 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4986 stmt = gimple_build_return (NULL);
4987 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4988 gsi_remove (&gsi, true);
4991 /* Move the parallel region into CHILD_CFUN. */
4993 if (gimple_in_ssa_p (cfun))
4995 init_tree_ssa (child_cfun);
4996 init_ssa_operands (child_cfun);
4997 child_cfun->gimple_df->in_ssa_p = true;
4998 block = NULL_TREE;
5000 else
5001 block = gimple_block (entry_stmt);
5003 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
5004 if (exit_bb)
5005 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
5006 /* When the OMP expansion process cannot guarantee an up-to-date
5007 loop tree arrange for the child function to fixup loops. */
5008 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
5009 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
5011 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
5012 num = vec_safe_length (child_cfun->local_decls);
5013 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
5015 t = (*child_cfun->local_decls)[srcidx];
5016 if (DECL_CONTEXT (t) == cfun->decl)
5017 continue;
5018 if (srcidx != dstidx)
5019 (*child_cfun->local_decls)[dstidx] = t;
5020 dstidx++;
5022 if (dstidx != num)
5023 vec_safe_truncate (child_cfun->local_decls, dstidx);
5025 /* Inform the callgraph about the new function. */
5026 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
5027 cgraph_node::add_new_function (child_fn, true);
5029 /* Fix the callgraph edges for child_cfun. Those for cfun will be
5030 fixed in a following pass. */
5031 push_cfun (child_cfun);
5032 if (optimize)
5033 optimize_omp_library_calls (entry_stmt);
5034 cgraph_edge::rebuild_edges ();
5036 /* Some EH regions might become dead, see PR34608. If
5037 pass_cleanup_cfg isn't the first pass to happen with the
5038 new child, these dead EH edges might cause problems.
5039 Clean them up now. */
5040 if (flag_exceptions)
5042 basic_block bb;
5043 bool changed = false;
5045 FOR_EACH_BB_FN (bb, cfun)
5046 changed |= gimple_purge_dead_eh_edges (bb);
5047 if (changed)
5048 cleanup_tree_cfg ();
5050 if (gimple_in_ssa_p (cfun))
5051 update_ssa (TODO_update_ssa);
5052 pop_cfun ();
5055 /* Emit a library call to launch the children threads. */
5056 if (is_cilk_for)
5057 expand_cilk_for_call (new_bb, entry_stmt, ws_args);
5058 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
5059 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
5060 else
5061 expand_task_call (new_bb, entry_stmt);
5062 if (gimple_in_ssa_p (cfun))
5063 update_ssa (TODO_update_ssa_only_virtuals);
5067 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
5068 of the combined collapse > 1 loop constructs, generate code like:
5069 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
5070 if (cond3 is <)
5071 adj = STEP3 - 1;
5072 else
5073 adj = STEP3 + 1;
5074 count3 = (adj + N32 - N31) / STEP3;
5075 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
5076 if (cond2 is <)
5077 adj = STEP2 - 1;
5078 else
5079 adj = STEP2 + 1;
5080 count2 = (adj + N22 - N21) / STEP2;
5081 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
5082 if (cond1 is <)
5083 adj = STEP1 - 1;
5084 else
5085 adj = STEP1 + 1;
5086 count1 = (adj + N12 - N11) / STEP1;
5087 count = count1 * count2 * count3;
5088 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
5089 count = 0;
5090 and set ZERO_ITER_BB to that bb. If this isn't the outermost
5091 of the combined loop constructs, just initialize COUNTS array
5092 from the _looptemp_ clauses. */
5094 /* NOTE: It *could* be better to moosh all of the BBs together,
5095 creating one larger BB with all the computation and the unexpected
5096 jump at the end. I.e.
5098 bool zero3, zero2, zero1, zero;
5100 zero3 = N32 c3 N31;
5101 count3 = (N32 - N31) /[cl] STEP3;
5102 zero2 = N22 c2 N21;
5103 count2 = (N22 - N21) /[cl] STEP2;
5104 zero1 = N12 c1 N11;
5105 count1 = (N12 - N11) /[cl] STEP1;
5106 zero = zero3 || zero2 || zero1;
5107 count = count1 * count2 * count3;
5108 if (__builtin_expect(zero, false)) goto zero_iter_bb;
5110 After all, we expect the zero=false, and thus we expect to have to
5111 evaluate all of the comparison expressions, so short-circuiting
5112 oughtn't be a win. Since the condition isn't protecting a
5113 denominator, we're not concerned about divide-by-zero, so we can
5114 fully evaluate count even if a numerator turned out to be wrong.
5116 It seems like putting this all together would create much better
5117 scheduling opportunities, and less pressure on the chip's branch
5118 predictor. */
5120 static void
5121 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5122 basic_block &entry_bb, tree *counts,
5123 basic_block &zero_iter_bb, int &first_zero_iter,
5124 basic_block &l2_dom_bb)
5126 tree t, type = TREE_TYPE (fd->loop.v);
5127 gimple stmt;
5128 edge e, ne;
5129 int i;
5131 /* Collapsed loops need work for expansion into SSA form. */
5132 gcc_assert (!gimple_in_ssa_p (cfun));
5134 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5135 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5137 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5138 isn't supposed to be handled, as the inner loop doesn't
5139 use it. */
5140 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5141 OMP_CLAUSE__LOOPTEMP_);
5142 gcc_assert (innerc);
5143 for (i = 0; i < fd->collapse; i++)
5145 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5146 OMP_CLAUSE__LOOPTEMP_);
5147 gcc_assert (innerc);
5148 if (i)
5149 counts[i] = OMP_CLAUSE_DECL (innerc);
5150 else
5151 counts[0] = NULL_TREE;
5153 return;
5156 for (i = 0; i < fd->collapse; i++)
5158 tree itype = TREE_TYPE (fd->loops[i].v);
5160 if (SSA_VAR_P (fd->loop.n2)
5161 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5162 fold_convert (itype, fd->loops[i].n1),
5163 fold_convert (itype, fd->loops[i].n2)))
5164 == NULL_TREE || !integer_onep (t)))
5166 tree n1, n2;
5167 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5168 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5169 true, GSI_SAME_STMT);
5170 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5171 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5172 true, GSI_SAME_STMT);
5173 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5174 NULL_TREE, NULL_TREE);
5175 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5176 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5177 expand_omp_regimplify_p, NULL, NULL)
5178 || walk_tree (gimple_cond_rhs_ptr (stmt),
5179 expand_omp_regimplify_p, NULL, NULL))
5181 *gsi = gsi_for_stmt (stmt);
5182 gimple_regimplify_operands (stmt, gsi);
5184 e = split_block (entry_bb, stmt);
5185 if (zero_iter_bb == NULL)
5187 first_zero_iter = i;
5188 zero_iter_bb = create_empty_bb (entry_bb);
5189 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5190 *gsi = gsi_after_labels (zero_iter_bb);
5191 stmt = gimple_build_assign (fd->loop.n2,
5192 build_zero_cst (type));
5193 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5194 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5195 entry_bb);
5197 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5198 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5199 e->flags = EDGE_TRUE_VALUE;
5200 e->probability = REG_BR_PROB_BASE - ne->probability;
5201 if (l2_dom_bb == NULL)
5202 l2_dom_bb = entry_bb;
5203 entry_bb = e->dest;
5204 *gsi = gsi_last_bb (entry_bb);
5207 if (POINTER_TYPE_P (itype))
5208 itype = signed_type_for (itype);
5209 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5210 ? -1 : 1));
5211 t = fold_build2 (PLUS_EXPR, itype,
5212 fold_convert (itype, fd->loops[i].step), t);
5213 t = fold_build2 (PLUS_EXPR, itype, t,
5214 fold_convert (itype, fd->loops[i].n2));
5215 t = fold_build2 (MINUS_EXPR, itype, t,
5216 fold_convert (itype, fd->loops[i].n1));
5217 /* ?? We could probably use CEIL_DIV_EXPR instead of
5218 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5219 generate the same code in the end because generically we
5220 don't know that the values involved must be negative for
5221 GT?? */
5222 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5223 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5224 fold_build1 (NEGATE_EXPR, itype, t),
5225 fold_build1 (NEGATE_EXPR, itype,
5226 fold_convert (itype,
5227 fd->loops[i].step)));
5228 else
5229 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5230 fold_convert (itype, fd->loops[i].step));
5231 t = fold_convert (type, t);
5232 if (TREE_CODE (t) == INTEGER_CST)
5233 counts[i] = t;
5234 else
5236 counts[i] = create_tmp_reg (type, ".count");
5237 expand_omp_build_assign (gsi, counts[i], t);
5239 if (SSA_VAR_P (fd->loop.n2))
5241 if (i == 0)
5242 t = counts[0];
5243 else
5244 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5245 expand_omp_build_assign (gsi, fd->loop.n2, t);
5251 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5252 T = V;
5253 V3 = N31 + (T % count3) * STEP3;
5254 T = T / count3;
5255 V2 = N21 + (T % count2) * STEP2;
5256 T = T / count2;
5257 V1 = N11 + T * STEP1;
5258 if this loop doesn't have an inner loop construct combined with it.
5259 If it does have an inner loop construct combined with it and the
5260 iteration count isn't known constant, store values from counts array
5261 into its _looptemp_ temporaries instead. */
5263 static void
5264 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5265 tree *counts, gimple inner_stmt, tree startvar)
5267 int i;
5268 if (gimple_omp_for_combined_p (fd->for_stmt))
5270 /* If fd->loop.n2 is constant, then no propagation of the counts
5271 is needed, they are constant. */
5272 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5273 return;
5275 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5276 ? gimple_omp_parallel_clauses (inner_stmt)
5277 : gimple_omp_for_clauses (inner_stmt);
5278 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5279 isn't supposed to be handled, as the inner loop doesn't
5280 use it. */
5281 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5282 gcc_assert (innerc);
5283 for (i = 0; i < fd->collapse; i++)
5285 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5286 OMP_CLAUSE__LOOPTEMP_);
5287 gcc_assert (innerc);
5288 if (i)
5290 tree tem = OMP_CLAUSE_DECL (innerc);
5291 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5292 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5293 false, GSI_CONTINUE_LINKING);
5294 gimple stmt = gimple_build_assign (tem, t);
5295 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5298 return;
5301 tree type = TREE_TYPE (fd->loop.v);
5302 tree tem = create_tmp_reg (type, ".tem");
5303 gimple stmt = gimple_build_assign (tem, startvar);
5304 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5306 for (i = fd->collapse - 1; i >= 0; i--)
5308 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5309 itype = vtype;
5310 if (POINTER_TYPE_P (vtype))
5311 itype = signed_type_for (vtype);
5312 if (i != 0)
5313 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5314 else
5315 t = tem;
5316 t = fold_convert (itype, t);
5317 t = fold_build2 (MULT_EXPR, itype, t,
5318 fold_convert (itype, fd->loops[i].step));
5319 if (POINTER_TYPE_P (vtype))
5320 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5321 else
5322 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5323 t = force_gimple_operand_gsi (gsi, t,
5324 DECL_P (fd->loops[i].v)
5325 && TREE_ADDRESSABLE (fd->loops[i].v),
5326 NULL_TREE, false,
5327 GSI_CONTINUE_LINKING);
5328 stmt = gimple_build_assign (fd->loops[i].v, t);
5329 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5330 if (i != 0)
5332 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5333 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5334 false, GSI_CONTINUE_LINKING);
5335 stmt = gimple_build_assign (tem, t);
5336 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5342 /* Helper function for expand_omp_for_*. Generate code like:
5343 L10:
5344 V3 += STEP3;
5345 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5346 L11:
5347 V3 = N31;
5348 V2 += STEP2;
5349 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5350 L12:
5351 V2 = N21;
5352 V1 += STEP1;
5353 goto BODY_BB; */
5355 static basic_block
5356 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5357 basic_block body_bb)
5359 basic_block last_bb, bb, collapse_bb = NULL;
5360 int i;
5361 gimple_stmt_iterator gsi;
5362 edge e;
5363 tree t;
5364 gimple stmt;
5366 last_bb = cont_bb;
5367 for (i = fd->collapse - 1; i >= 0; i--)
5369 tree vtype = TREE_TYPE (fd->loops[i].v);
5371 bb = create_empty_bb (last_bb);
5372 add_bb_to_loop (bb, last_bb->loop_father);
5373 gsi = gsi_start_bb (bb);
5375 if (i < fd->collapse - 1)
5377 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5378 e->probability = REG_BR_PROB_BASE / 8;
5380 t = fd->loops[i + 1].n1;
5381 t = force_gimple_operand_gsi (&gsi, t,
5382 DECL_P (fd->loops[i + 1].v)
5383 && TREE_ADDRESSABLE (fd->loops[i
5384 + 1].v),
5385 NULL_TREE, false,
5386 GSI_CONTINUE_LINKING);
5387 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5388 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5390 else
5391 collapse_bb = bb;
5393 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5395 if (POINTER_TYPE_P (vtype))
5396 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5397 else
5398 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5399 t = force_gimple_operand_gsi (&gsi, t,
5400 DECL_P (fd->loops[i].v)
5401 && TREE_ADDRESSABLE (fd->loops[i].v),
5402 NULL_TREE, false, GSI_CONTINUE_LINKING);
5403 stmt = gimple_build_assign (fd->loops[i].v, t);
5404 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5406 if (i > 0)
5408 t = fd->loops[i].n2;
5409 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5410 false, GSI_CONTINUE_LINKING);
5411 tree v = fd->loops[i].v;
5412 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5413 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5414 false, GSI_CONTINUE_LINKING);
5415 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5416 stmt = gimple_build_cond_empty (t);
5417 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5418 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5419 e->probability = REG_BR_PROB_BASE * 7 / 8;
5421 else
5422 make_edge (bb, body_bb, EDGE_FALLTHRU);
5423 last_bb = bb;
5426 return collapse_bb;
5430 /* A subroutine of expand_omp_for. Generate code for a parallel
5431 loop with any schedule. Given parameters:
5433 for (V = N1; V cond N2; V += STEP) BODY;
5435 where COND is "<" or ">", we generate pseudocode
5437 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5438 if (more) goto L0; else goto L3;
5440 V = istart0;
5441 iend = iend0;
5443 BODY;
5444 V += STEP;
5445 if (V cond iend) goto L1; else goto L2;
5447 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5450 If this is a combined omp parallel loop, instead of the call to
5451 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5452 If this is gimple_omp_for_combined_p loop, then instead of assigning
5453 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5454 inner GIMPLE_OMP_FOR and V += STEP; and
5455 if (V cond iend) goto L1; else goto L2; are removed.
5457 For collapsed loops, given parameters:
5458 collapse(3)
5459 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5460 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5461 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5462 BODY;
5464 we generate pseudocode
5466 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5467 if (cond3 is <)
5468 adj = STEP3 - 1;
5469 else
5470 adj = STEP3 + 1;
5471 count3 = (adj + N32 - N31) / STEP3;
5472 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5473 if (cond2 is <)
5474 adj = STEP2 - 1;
5475 else
5476 adj = STEP2 + 1;
5477 count2 = (adj + N22 - N21) / STEP2;
5478 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5479 if (cond1 is <)
5480 adj = STEP1 - 1;
5481 else
5482 adj = STEP1 + 1;
5483 count1 = (adj + N12 - N11) / STEP1;
5484 count = count1 * count2 * count3;
5485 goto Z1;
5487 count = 0;
5489 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5490 if (more) goto L0; else goto L3;
5492 V = istart0;
5493 T = V;
5494 V3 = N31 + (T % count3) * STEP3;
5495 T = T / count3;
5496 V2 = N21 + (T % count2) * STEP2;
5497 T = T / count2;
5498 V1 = N11 + T * STEP1;
5499 iend = iend0;
5501 BODY;
5502 V += 1;
5503 if (V < iend) goto L10; else goto L2;
5504 L10:
5505 V3 += STEP3;
5506 if (V3 cond3 N32) goto L1; else goto L11;
5507 L11:
5508 V3 = N31;
5509 V2 += STEP2;
5510 if (V2 cond2 N22) goto L1; else goto L12;
5511 L12:
5512 V2 = N21;
5513 V1 += STEP1;
5514 goto L1;
5516 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5521 static void
5522 expand_omp_for_generic (struct omp_region *region,
5523 struct omp_for_data *fd,
5524 enum built_in_function start_fn,
5525 enum built_in_function next_fn,
5526 gimple inner_stmt)
5528 tree type, istart0, iend0, iend;
5529 tree t, vmain, vback, bias = NULL_TREE;
5530 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5531 basic_block l2_bb = NULL, l3_bb = NULL;
5532 gimple_stmt_iterator gsi;
5533 gimple stmt;
5534 bool in_combined_parallel = is_combined_parallel (region);
5535 bool broken_loop = region->cont == NULL;
5536 edge e, ne;
5537 tree *counts = NULL;
5538 int i;
5540 gcc_assert (!broken_loop || !in_combined_parallel);
5541 gcc_assert (fd->iter_type == long_integer_type_node
5542 || !in_combined_parallel);
5544 type = TREE_TYPE (fd->loop.v);
5545 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5546 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5547 TREE_ADDRESSABLE (istart0) = 1;
5548 TREE_ADDRESSABLE (iend0) = 1;
5550 /* See if we need to bias by LLONG_MIN. */
5551 if (fd->iter_type == long_long_unsigned_type_node
5552 && TREE_CODE (type) == INTEGER_TYPE
5553 && !TYPE_UNSIGNED (type))
5555 tree n1, n2;
5557 if (fd->loop.cond_code == LT_EXPR)
5559 n1 = fd->loop.n1;
5560 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5562 else
5564 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5565 n2 = fd->loop.n1;
5567 if (TREE_CODE (n1) != INTEGER_CST
5568 || TREE_CODE (n2) != INTEGER_CST
5569 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5570 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5573 entry_bb = region->entry;
5574 cont_bb = region->cont;
5575 collapse_bb = NULL;
5576 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5577 gcc_assert (broken_loop
5578 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5579 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5580 l1_bb = single_succ (l0_bb);
5581 if (!broken_loop)
5583 l2_bb = create_empty_bb (cont_bb);
5584 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5585 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5587 else
5588 l2_bb = NULL;
5589 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5590 exit_bb = region->exit;
5592 gsi = gsi_last_bb (entry_bb);
5594 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5595 if (fd->collapse > 1)
5597 int first_zero_iter = -1;
5598 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5600 counts = XALLOCAVEC (tree, fd->collapse);
5601 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5602 zero_iter_bb, first_zero_iter,
5603 l2_dom_bb);
5605 if (zero_iter_bb)
5607 /* Some counts[i] vars might be uninitialized if
5608 some loop has zero iterations. But the body shouldn't
5609 be executed in that case, so just avoid uninit warnings. */
5610 for (i = first_zero_iter; i < fd->collapse; i++)
5611 if (SSA_VAR_P (counts[i]))
5612 TREE_NO_WARNING (counts[i]) = 1;
5613 gsi_prev (&gsi);
5614 e = split_block (entry_bb, gsi_stmt (gsi));
5615 entry_bb = e->dest;
5616 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5617 gsi = gsi_last_bb (entry_bb);
5618 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5619 get_immediate_dominator (CDI_DOMINATORS,
5620 zero_iter_bb));
5623 if (in_combined_parallel)
5625 /* In a combined parallel loop, emit a call to
5626 GOMP_loop_foo_next. */
5627 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5628 build_fold_addr_expr (istart0),
5629 build_fold_addr_expr (iend0));
5631 else
5633 tree t0, t1, t2, t3, t4;
5634 /* If this is not a combined parallel loop, emit a call to
5635 GOMP_loop_foo_start in ENTRY_BB. */
5636 t4 = build_fold_addr_expr (iend0);
5637 t3 = build_fold_addr_expr (istart0);
5638 t2 = fold_convert (fd->iter_type, fd->loop.step);
5639 t1 = fd->loop.n2;
5640 t0 = fd->loop.n1;
5641 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5643 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5644 OMP_CLAUSE__LOOPTEMP_);
5645 gcc_assert (innerc);
5646 t0 = OMP_CLAUSE_DECL (innerc);
5647 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5648 OMP_CLAUSE__LOOPTEMP_);
5649 gcc_assert (innerc);
5650 t1 = OMP_CLAUSE_DECL (innerc);
5652 if (POINTER_TYPE_P (TREE_TYPE (t0))
5653 && TYPE_PRECISION (TREE_TYPE (t0))
5654 != TYPE_PRECISION (fd->iter_type))
5656 /* Avoid casting pointers to integer of a different size. */
5657 tree itype = signed_type_for (type);
5658 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5659 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5661 else
5663 t1 = fold_convert (fd->iter_type, t1);
5664 t0 = fold_convert (fd->iter_type, t0);
5666 if (bias)
5668 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5669 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5671 if (fd->iter_type == long_integer_type_node)
5673 if (fd->chunk_size)
5675 t = fold_convert (fd->iter_type, fd->chunk_size);
5676 t = build_call_expr (builtin_decl_explicit (start_fn),
5677 6, t0, t1, t2, t, t3, t4);
5679 else
5680 t = build_call_expr (builtin_decl_explicit (start_fn),
5681 5, t0, t1, t2, t3, t4);
5683 else
5685 tree t5;
5686 tree c_bool_type;
5687 tree bfn_decl;
5689 /* The GOMP_loop_ull_*start functions have additional boolean
5690 argument, true for < loops and false for > loops.
5691 In Fortran, the C bool type can be different from
5692 boolean_type_node. */
5693 bfn_decl = builtin_decl_explicit (start_fn);
5694 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5695 t5 = build_int_cst (c_bool_type,
5696 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5697 if (fd->chunk_size)
5699 tree bfn_decl = builtin_decl_explicit (start_fn);
5700 t = fold_convert (fd->iter_type, fd->chunk_size);
5701 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5703 else
5704 t = build_call_expr (builtin_decl_explicit (start_fn),
5705 6, t5, t0, t1, t2, t3, t4);
5708 if (TREE_TYPE (t) != boolean_type_node)
5709 t = fold_build2 (NE_EXPR, boolean_type_node,
5710 t, build_int_cst (TREE_TYPE (t), 0));
5711 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5712 true, GSI_SAME_STMT);
5713 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5715 /* Remove the GIMPLE_OMP_FOR statement. */
5716 gsi_remove (&gsi, true);
5718 /* Iteration setup for sequential loop goes in L0_BB. */
5719 tree startvar = fd->loop.v;
5720 tree endvar = NULL_TREE;
5722 if (gimple_omp_for_combined_p (fd->for_stmt))
5724 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5725 && gimple_omp_for_kind (inner_stmt)
5726 == GF_OMP_FOR_KIND_SIMD);
5727 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5728 OMP_CLAUSE__LOOPTEMP_);
5729 gcc_assert (innerc);
5730 startvar = OMP_CLAUSE_DECL (innerc);
5731 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5732 OMP_CLAUSE__LOOPTEMP_);
5733 gcc_assert (innerc);
5734 endvar = OMP_CLAUSE_DECL (innerc);
5737 gsi = gsi_start_bb (l0_bb);
5738 t = istart0;
5739 if (bias)
5740 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5741 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5742 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5743 t = fold_convert (TREE_TYPE (startvar), t);
5744 t = force_gimple_operand_gsi (&gsi, t,
5745 DECL_P (startvar)
5746 && TREE_ADDRESSABLE (startvar),
5747 NULL_TREE, false, GSI_CONTINUE_LINKING);
5748 stmt = gimple_build_assign (startvar, t);
5749 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5751 t = iend0;
5752 if (bias)
5753 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5754 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5755 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5756 t = fold_convert (TREE_TYPE (startvar), t);
5757 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5758 false, GSI_CONTINUE_LINKING);
5759 if (endvar)
5761 stmt = gimple_build_assign (endvar, iend);
5762 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5763 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
5764 stmt = gimple_build_assign (fd->loop.v, iend);
5765 else
5766 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, iend,
5767 NULL_TREE);
5768 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5770 if (fd->collapse > 1)
5771 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5773 if (!broken_loop)
5775 /* Code to control the increment and predicate for the sequential
5776 loop goes in the CONT_BB. */
5777 gsi = gsi_last_bb (cont_bb);
5778 stmt = gsi_stmt (gsi);
5779 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5780 vmain = gimple_omp_continue_control_use (stmt);
5781 vback = gimple_omp_continue_control_def (stmt);
5783 if (!gimple_omp_for_combined_p (fd->for_stmt))
5785 if (POINTER_TYPE_P (type))
5786 t = fold_build_pointer_plus (vmain, fd->loop.step);
5787 else
5788 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5789 t = force_gimple_operand_gsi (&gsi, t,
5790 DECL_P (vback)
5791 && TREE_ADDRESSABLE (vback),
5792 NULL_TREE, true, GSI_SAME_STMT);
5793 stmt = gimple_build_assign (vback, t);
5794 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5796 t = build2 (fd->loop.cond_code, boolean_type_node,
5797 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5798 iend);
5799 stmt = gimple_build_cond_empty (t);
5800 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5803 /* Remove GIMPLE_OMP_CONTINUE. */
5804 gsi_remove (&gsi, true);
5806 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5807 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5809 /* Emit code to get the next parallel iteration in L2_BB. */
5810 gsi = gsi_start_bb (l2_bb);
5812 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5813 build_fold_addr_expr (istart0),
5814 build_fold_addr_expr (iend0));
5815 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5816 false, GSI_CONTINUE_LINKING);
5817 if (TREE_TYPE (t) != boolean_type_node)
5818 t = fold_build2 (NE_EXPR, boolean_type_node,
5819 t, build_int_cst (TREE_TYPE (t), 0));
5820 stmt = gimple_build_cond_empty (t);
5821 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5824 /* Add the loop cleanup function. */
5825 gsi = gsi_last_bb (exit_bb);
5826 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5827 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5828 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5829 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5830 else
5831 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5832 stmt = gimple_build_call (t, 0);
5833 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5834 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5835 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5836 gsi_remove (&gsi, true);
5838 /* Connect the new blocks. */
5839 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5840 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5842 if (!broken_loop)
5844 gimple_seq phis;
5846 e = find_edge (cont_bb, l3_bb);
5847 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5849 phis = phi_nodes (l3_bb);
5850 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5852 gimple phi = gsi_stmt (gsi);
5853 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5854 PHI_ARG_DEF_FROM_EDGE (phi, e));
5856 remove_edge (e);
5858 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5859 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5860 e = find_edge (cont_bb, l1_bb);
5861 if (gimple_omp_for_combined_p (fd->for_stmt))
5863 remove_edge (e);
5864 e = NULL;
5866 else if (fd->collapse > 1)
5868 remove_edge (e);
5869 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5871 else
5872 e->flags = EDGE_TRUE_VALUE;
5873 if (e)
5875 e->probability = REG_BR_PROB_BASE * 7 / 8;
5876 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5878 else
5880 e = find_edge (cont_bb, l2_bb);
5881 e->flags = EDGE_FALLTHRU;
5883 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5885 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5886 recompute_dominator (CDI_DOMINATORS, l2_bb));
5887 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5888 recompute_dominator (CDI_DOMINATORS, l3_bb));
5889 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5890 recompute_dominator (CDI_DOMINATORS, l0_bb));
5891 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5892 recompute_dominator (CDI_DOMINATORS, l1_bb));
5894 struct loop *outer_loop = alloc_loop ();
5895 outer_loop->header = l0_bb;
5896 outer_loop->latch = l2_bb;
5897 add_loop (outer_loop, l0_bb->loop_father);
5899 if (!gimple_omp_for_combined_p (fd->for_stmt))
5901 struct loop *loop = alloc_loop ();
5902 loop->header = l1_bb;
5903 /* The loop may have multiple latches. */
5904 add_loop (loop, outer_loop);
5910 /* A subroutine of expand_omp_for. Generate code for a parallel
5911 loop with static schedule and no specified chunk size. Given
5912 parameters:
5914 for (V = N1; V cond N2; V += STEP) BODY;
5916 where COND is "<" or ">", we generate pseudocode
5918 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5919 if (cond is <)
5920 adj = STEP - 1;
5921 else
5922 adj = STEP + 1;
5923 if ((__typeof (V)) -1 > 0 && cond is >)
5924 n = -(adj + N2 - N1) / -STEP;
5925 else
5926 n = (adj + N2 - N1) / STEP;
5927 q = n / nthreads;
5928 tt = n % nthreads;
5929 if (threadid < tt) goto L3; else goto L4;
5931 tt = 0;
5932 q = q + 1;
5934 s0 = q * threadid + tt;
5935 e0 = s0 + q;
5936 V = s0 * STEP + N1;
5937 if (s0 >= e0) goto L2; else goto L0;
5939 e = e0 * STEP + N1;
5941 BODY;
5942 V += STEP;
5943 if (V cond e) goto L1;
5947 static void
5948 expand_omp_for_static_nochunk (struct omp_region *region,
5949 struct omp_for_data *fd,
5950 gimple inner_stmt)
5952 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
5953 tree type, itype, vmain, vback;
5954 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
5955 basic_block body_bb, cont_bb, collapse_bb = NULL;
5956 basic_block fin_bb;
5957 gimple_stmt_iterator gsi;
5958 gimple stmt;
5959 edge ep;
5960 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
5961 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
5962 bool broken_loop = region->cont == NULL;
5963 tree *counts = NULL;
5964 tree n1, n2, step;
5966 itype = type = TREE_TYPE (fd->loop.v);
5967 if (POINTER_TYPE_P (type))
5968 itype = signed_type_for (type);
5970 entry_bb = region->entry;
5971 cont_bb = region->cont;
5972 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5973 fin_bb = BRANCH_EDGE (entry_bb)->dest;
5974 gcc_assert (broken_loop
5975 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
5976 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5977 body_bb = single_succ (seq_start_bb);
5978 if (!broken_loop)
5980 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5981 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5983 exit_bb = region->exit;
5985 /* Iteration space partitioning goes in ENTRY_BB. */
5986 gsi = gsi_last_bb (entry_bb);
5987 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5989 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
5991 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
5992 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
5995 if (fd->collapse > 1)
5997 int first_zero_iter = -1;
5998 basic_block l2_dom_bb = NULL;
6000 counts = XALLOCAVEC (tree, fd->collapse);
6001 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6002 fin_bb, first_zero_iter,
6003 l2_dom_bb);
6004 t = NULL_TREE;
6006 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6007 t = integer_one_node;
6008 else
6009 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6010 fold_convert (type, fd->loop.n1),
6011 fold_convert (type, fd->loop.n2));
6012 if (fd->collapse == 1
6013 && TYPE_UNSIGNED (type)
6014 && (t == NULL_TREE || !integer_onep (t)))
6016 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6017 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6018 true, GSI_SAME_STMT);
6019 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6020 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6021 true, GSI_SAME_STMT);
6022 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6023 NULL_TREE, NULL_TREE);
6024 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6025 if (walk_tree (gimple_cond_lhs_ptr (stmt),
6026 expand_omp_regimplify_p, NULL, NULL)
6027 || walk_tree (gimple_cond_rhs_ptr (stmt),
6028 expand_omp_regimplify_p, NULL, NULL))
6030 gsi = gsi_for_stmt (stmt);
6031 gimple_regimplify_operands (stmt, &gsi);
6033 ep = split_block (entry_bb, stmt);
6034 ep->flags = EDGE_TRUE_VALUE;
6035 entry_bb = ep->dest;
6036 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6037 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
6038 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
6039 if (gimple_in_ssa_p (cfun))
6041 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6042 for (gsi = gsi_start_phis (fin_bb);
6043 !gsi_end_p (gsi); gsi_next (&gsi))
6045 gimple phi = gsi_stmt (gsi);
6046 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6047 ep, UNKNOWN_LOCATION);
6050 gsi = gsi_last_bb (entry_bb);
6053 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6054 t = fold_convert (itype, t);
6055 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6056 true, GSI_SAME_STMT);
6058 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6059 t = fold_convert (itype, t);
6060 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6061 true, GSI_SAME_STMT);
6063 n1 = fd->loop.n1;
6064 n2 = fd->loop.n2;
6065 step = fd->loop.step;
6066 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6068 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6069 OMP_CLAUSE__LOOPTEMP_);
6070 gcc_assert (innerc);
6071 n1 = OMP_CLAUSE_DECL (innerc);
6072 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6073 OMP_CLAUSE__LOOPTEMP_);
6074 gcc_assert (innerc);
6075 n2 = OMP_CLAUSE_DECL (innerc);
6077 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6078 true, NULL_TREE, true, GSI_SAME_STMT);
6079 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6080 true, NULL_TREE, true, GSI_SAME_STMT);
6081 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6082 true, NULL_TREE, true, GSI_SAME_STMT);
6084 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6085 t = fold_build2 (PLUS_EXPR, itype, step, t);
6086 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6087 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6088 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6089 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6090 fold_build1 (NEGATE_EXPR, itype, t),
6091 fold_build1 (NEGATE_EXPR, itype, step));
6092 else
6093 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6094 t = fold_convert (itype, t);
6095 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6097 q = create_tmp_reg (itype, "q");
6098 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
6099 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6100 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
6102 tt = create_tmp_reg (itype, "tt");
6103 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
6104 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6105 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
6107 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
6108 stmt = gimple_build_cond_empty (t);
6109 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6111 second_bb = split_block (entry_bb, stmt)->dest;
6112 gsi = gsi_last_bb (second_bb);
6113 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6115 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
6116 GSI_SAME_STMT);
6117 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
6118 build_int_cst (itype, 1));
6119 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6121 third_bb = split_block (second_bb, stmt)->dest;
6122 gsi = gsi_last_bb (third_bb);
6123 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6125 t = build2 (MULT_EXPR, itype, q, threadid);
6126 t = build2 (PLUS_EXPR, itype, t, tt);
6127 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6129 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6130 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6132 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6133 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6135 /* Remove the GIMPLE_OMP_FOR statement. */
6136 gsi_remove (&gsi, true);
6138 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6139 gsi = gsi_start_bb (seq_start_bb);
6141 tree startvar = fd->loop.v;
6142 tree endvar = NULL_TREE;
6144 if (gimple_omp_for_combined_p (fd->for_stmt))
6146 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6147 ? gimple_omp_parallel_clauses (inner_stmt)
6148 : gimple_omp_for_clauses (inner_stmt);
6149 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6150 gcc_assert (innerc);
6151 startvar = OMP_CLAUSE_DECL (innerc);
6152 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6153 OMP_CLAUSE__LOOPTEMP_);
6154 gcc_assert (innerc);
6155 endvar = OMP_CLAUSE_DECL (innerc);
6157 t = fold_convert (itype, s0);
6158 t = fold_build2 (MULT_EXPR, itype, t, step);
6159 if (POINTER_TYPE_P (type))
6160 t = fold_build_pointer_plus (n1, t);
6161 else
6162 t = fold_build2 (PLUS_EXPR, type, t, n1);
6163 t = fold_convert (TREE_TYPE (startvar), t);
6164 t = force_gimple_operand_gsi (&gsi, t,
6165 DECL_P (startvar)
6166 && TREE_ADDRESSABLE (startvar),
6167 NULL_TREE, false, GSI_CONTINUE_LINKING);
6168 stmt = gimple_build_assign (startvar, t);
6169 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6171 t = fold_convert (itype, e0);
6172 t = fold_build2 (MULT_EXPR, itype, t, step);
6173 if (POINTER_TYPE_P (type))
6174 t = fold_build_pointer_plus (n1, t);
6175 else
6176 t = fold_build2 (PLUS_EXPR, type, t, n1);
6177 t = fold_convert (TREE_TYPE (startvar), t);
6178 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6179 false, GSI_CONTINUE_LINKING);
6180 if (endvar)
6182 stmt = gimple_build_assign (endvar, e);
6183 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6184 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6185 stmt = gimple_build_assign (fd->loop.v, e);
6186 else
6187 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6188 NULL_TREE);
6189 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6191 if (fd->collapse > 1)
6192 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6194 if (!broken_loop)
6196 /* The code controlling the sequential loop replaces the
6197 GIMPLE_OMP_CONTINUE. */
6198 gsi = gsi_last_bb (cont_bb);
6199 stmt = gsi_stmt (gsi);
6200 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6201 vmain = gimple_omp_continue_control_use (stmt);
6202 vback = gimple_omp_continue_control_def (stmt);
6204 if (!gimple_omp_for_combined_p (fd->for_stmt))
6206 if (POINTER_TYPE_P (type))
6207 t = fold_build_pointer_plus (vmain, step);
6208 else
6209 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6210 t = force_gimple_operand_gsi (&gsi, t,
6211 DECL_P (vback)
6212 && TREE_ADDRESSABLE (vback),
6213 NULL_TREE, true, GSI_SAME_STMT);
6214 stmt = gimple_build_assign (vback, t);
6215 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6217 t = build2 (fd->loop.cond_code, boolean_type_node,
6218 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6219 ? t : vback, e);
6220 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6223 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6224 gsi_remove (&gsi, true);
6226 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6227 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6230 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6231 gsi = gsi_last_bb (exit_bb);
6232 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6234 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6235 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6237 gsi_remove (&gsi, true);
6239 /* Connect all the blocks. */
6240 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6241 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6242 ep = find_edge (entry_bb, second_bb);
6243 ep->flags = EDGE_TRUE_VALUE;
6244 ep->probability = REG_BR_PROB_BASE / 4;
6245 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6246 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6248 if (!broken_loop)
6250 ep = find_edge (cont_bb, body_bb);
6251 if (gimple_omp_for_combined_p (fd->for_stmt))
6253 remove_edge (ep);
6254 ep = NULL;
6256 else if (fd->collapse > 1)
6258 remove_edge (ep);
6259 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6261 else
6262 ep->flags = EDGE_TRUE_VALUE;
6263 find_edge (cont_bb, fin_bb)->flags
6264 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6267 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6268 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6269 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6271 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6272 recompute_dominator (CDI_DOMINATORS, body_bb));
6273 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6274 recompute_dominator (CDI_DOMINATORS, fin_bb));
6276 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6278 struct loop *loop = alloc_loop ();
6279 loop->header = body_bb;
6280 if (collapse_bb == NULL)
6281 loop->latch = cont_bb;
6282 add_loop (loop, body_bb->loop_father);
6287 /* A subroutine of expand_omp_for. Generate code for a parallel
6288 loop with static schedule and a specified chunk size. Given
6289 parameters:
6291 for (V = N1; V cond N2; V += STEP) BODY;
6293 where COND is "<" or ">", we generate pseudocode
6295 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6296 if (cond is <)
6297 adj = STEP - 1;
6298 else
6299 adj = STEP + 1;
6300 if ((__typeof (V)) -1 > 0 && cond is >)
6301 n = -(adj + N2 - N1) / -STEP;
6302 else
6303 n = (adj + N2 - N1) / STEP;
6304 trip = 0;
6305 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6306 here so that V is defined
6307 if the loop is not entered
6309 s0 = (trip * nthreads + threadid) * CHUNK;
6310 e0 = min(s0 + CHUNK, n);
6311 if (s0 < n) goto L1; else goto L4;
6313 V = s0 * STEP + N1;
6314 e = e0 * STEP + N1;
6316 BODY;
6317 V += STEP;
6318 if (V cond e) goto L2; else goto L3;
6320 trip += 1;
6321 goto L0;
6325 static void
6326 expand_omp_for_static_chunk (struct omp_region *region,
6327 struct omp_for_data *fd, gimple inner_stmt)
6329 tree n, s0, e0, e, t;
6330 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6331 tree type, itype, vmain, vback, vextra;
6332 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6333 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6334 gimple_stmt_iterator gsi;
6335 gimple stmt;
6336 edge se;
6337 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6338 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6339 bool broken_loop = region->cont == NULL;
6340 tree *counts = NULL;
6341 tree n1, n2, step;
6343 itype = type = TREE_TYPE (fd->loop.v);
6344 if (POINTER_TYPE_P (type))
6345 itype = signed_type_for (type);
6347 entry_bb = region->entry;
6348 se = split_block (entry_bb, last_stmt (entry_bb));
6349 entry_bb = se->src;
6350 iter_part_bb = se->dest;
6351 cont_bb = region->cont;
6352 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6353 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6354 gcc_assert (broken_loop
6355 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6356 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6357 body_bb = single_succ (seq_start_bb);
6358 if (!broken_loop)
6360 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6361 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6362 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6364 exit_bb = region->exit;
6366 /* Trip and adjustment setup goes in ENTRY_BB. */
6367 gsi = gsi_last_bb (entry_bb);
6368 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6370 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6372 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6373 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6376 if (fd->collapse > 1)
6378 int first_zero_iter = -1;
6379 basic_block l2_dom_bb = NULL;
6381 counts = XALLOCAVEC (tree, fd->collapse);
6382 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6383 fin_bb, first_zero_iter,
6384 l2_dom_bb);
6385 t = NULL_TREE;
6387 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6388 t = integer_one_node;
6389 else
6390 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6391 fold_convert (type, fd->loop.n1),
6392 fold_convert (type, fd->loop.n2));
6393 if (fd->collapse == 1
6394 && TYPE_UNSIGNED (type)
6395 && (t == NULL_TREE || !integer_onep (t)))
6397 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6398 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6399 true, GSI_SAME_STMT);
6400 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6401 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6402 true, GSI_SAME_STMT);
6403 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6404 NULL_TREE, NULL_TREE);
6405 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6406 if (walk_tree (gimple_cond_lhs_ptr (stmt),
6407 expand_omp_regimplify_p, NULL, NULL)
6408 || walk_tree (gimple_cond_rhs_ptr (stmt),
6409 expand_omp_regimplify_p, NULL, NULL))
6411 gsi = gsi_for_stmt (stmt);
6412 gimple_regimplify_operands (stmt, &gsi);
6414 se = split_block (entry_bb, stmt);
6415 se->flags = EDGE_TRUE_VALUE;
6416 entry_bb = se->dest;
6417 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6418 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6419 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6420 if (gimple_in_ssa_p (cfun))
6422 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6423 for (gsi = gsi_start_phis (fin_bb);
6424 !gsi_end_p (gsi); gsi_next (&gsi))
6426 gimple phi = gsi_stmt (gsi);
6427 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6428 se, UNKNOWN_LOCATION);
6431 gsi = gsi_last_bb (entry_bb);
6434 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6435 t = fold_convert (itype, t);
6436 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6437 true, GSI_SAME_STMT);
6439 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6440 t = fold_convert (itype, t);
6441 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6442 true, GSI_SAME_STMT);
6444 n1 = fd->loop.n1;
6445 n2 = fd->loop.n2;
6446 step = fd->loop.step;
6447 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6449 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6450 OMP_CLAUSE__LOOPTEMP_);
6451 gcc_assert (innerc);
6452 n1 = OMP_CLAUSE_DECL (innerc);
6453 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6454 OMP_CLAUSE__LOOPTEMP_);
6455 gcc_assert (innerc);
6456 n2 = OMP_CLAUSE_DECL (innerc);
6458 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6459 true, NULL_TREE, true, GSI_SAME_STMT);
6460 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6461 true, NULL_TREE, true, GSI_SAME_STMT);
6462 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6463 true, NULL_TREE, true, GSI_SAME_STMT);
6464 fd->chunk_size
6465 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
6466 true, NULL_TREE, true, GSI_SAME_STMT);
6468 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6469 t = fold_build2 (PLUS_EXPR, itype, step, t);
6470 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6471 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6472 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6473 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6474 fold_build1 (NEGATE_EXPR, itype, t),
6475 fold_build1 (NEGATE_EXPR, itype, step));
6476 else
6477 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6478 t = fold_convert (itype, t);
6479 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6480 true, GSI_SAME_STMT);
6482 trip_var = create_tmp_reg (itype, ".trip");
6483 if (gimple_in_ssa_p (cfun))
6485 trip_init = make_ssa_name (trip_var, NULL);
6486 trip_main = make_ssa_name (trip_var, NULL);
6487 trip_back = make_ssa_name (trip_var, NULL);
6489 else
6491 trip_init = trip_var;
6492 trip_main = trip_var;
6493 trip_back = trip_var;
6496 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
6497 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6499 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6500 t = fold_build2 (MULT_EXPR, itype, t, step);
6501 if (POINTER_TYPE_P (type))
6502 t = fold_build_pointer_plus (n1, t);
6503 else
6504 t = fold_build2 (PLUS_EXPR, type, t, n1);
6505 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6506 true, GSI_SAME_STMT);
6508 /* Remove the GIMPLE_OMP_FOR. */
6509 gsi_remove (&gsi, true);
6511 /* Iteration space partitioning goes in ITER_PART_BB. */
6512 gsi = gsi_last_bb (iter_part_bb);
6514 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6515 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6516 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6517 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6518 false, GSI_CONTINUE_LINKING);
6520 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6521 t = fold_build2 (MIN_EXPR, itype, t, n);
6522 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6523 false, GSI_CONTINUE_LINKING);
6525 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6526 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6528 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6529 gsi = gsi_start_bb (seq_start_bb);
6531 tree startvar = fd->loop.v;
6532 tree endvar = NULL_TREE;
6534 if (gimple_omp_for_combined_p (fd->for_stmt))
6536 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6537 ? gimple_omp_parallel_clauses (inner_stmt)
6538 : gimple_omp_for_clauses (inner_stmt);
6539 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6540 gcc_assert (innerc);
6541 startvar = OMP_CLAUSE_DECL (innerc);
6542 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6543 OMP_CLAUSE__LOOPTEMP_);
6544 gcc_assert (innerc);
6545 endvar = OMP_CLAUSE_DECL (innerc);
6548 t = fold_convert (itype, s0);
6549 t = fold_build2 (MULT_EXPR, itype, t, step);
6550 if (POINTER_TYPE_P (type))
6551 t = fold_build_pointer_plus (n1, t);
6552 else
6553 t = fold_build2 (PLUS_EXPR, type, t, n1);
6554 t = fold_convert (TREE_TYPE (startvar), t);
6555 t = force_gimple_operand_gsi (&gsi, t,
6556 DECL_P (startvar)
6557 && TREE_ADDRESSABLE (startvar),
6558 NULL_TREE, false, GSI_CONTINUE_LINKING);
6559 stmt = gimple_build_assign (startvar, t);
6560 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6562 t = fold_convert (itype, e0);
6563 t = fold_build2 (MULT_EXPR, itype, t, step);
6564 if (POINTER_TYPE_P (type))
6565 t = fold_build_pointer_plus (n1, t);
6566 else
6567 t = fold_build2 (PLUS_EXPR, type, t, n1);
6568 t = fold_convert (TREE_TYPE (startvar), t);
6569 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6570 false, GSI_CONTINUE_LINKING);
6571 if (endvar)
6573 stmt = gimple_build_assign (endvar, e);
6574 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6575 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6576 stmt = gimple_build_assign (fd->loop.v, e);
6577 else
6578 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6579 NULL_TREE);
6580 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6582 if (fd->collapse > 1)
6583 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6585 if (!broken_loop)
6587 /* The code controlling the sequential loop goes in CONT_BB,
6588 replacing the GIMPLE_OMP_CONTINUE. */
6589 gsi = gsi_last_bb (cont_bb);
6590 stmt = gsi_stmt (gsi);
6591 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6592 vmain = gimple_omp_continue_control_use (stmt);
6593 vback = gimple_omp_continue_control_def (stmt);
6595 if (!gimple_omp_for_combined_p (fd->for_stmt))
6597 if (POINTER_TYPE_P (type))
6598 t = fold_build_pointer_plus (vmain, step);
6599 else
6600 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6601 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
6602 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6603 true, GSI_SAME_STMT);
6604 stmt = gimple_build_assign (vback, t);
6605 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6607 t = build2 (fd->loop.cond_code, boolean_type_node,
6608 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6609 ? t : vback, e);
6610 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6613 /* Remove GIMPLE_OMP_CONTINUE. */
6614 gsi_remove (&gsi, true);
6616 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6617 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6619 /* Trip update code goes into TRIP_UPDATE_BB. */
6620 gsi = gsi_start_bb (trip_update_bb);
6622 t = build_int_cst (itype, 1);
6623 t = build2 (PLUS_EXPR, itype, trip_main, t);
6624 stmt = gimple_build_assign (trip_back, t);
6625 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6628 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6629 gsi = gsi_last_bb (exit_bb);
6630 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6632 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6633 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6635 gsi_remove (&gsi, true);
6637 /* Connect the new blocks. */
6638 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6639 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6641 if (!broken_loop)
6643 se = find_edge (cont_bb, body_bb);
6644 if (gimple_omp_for_combined_p (fd->for_stmt))
6646 remove_edge (se);
6647 se = NULL;
6649 else if (fd->collapse > 1)
6651 remove_edge (se);
6652 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6654 else
6655 se->flags = EDGE_TRUE_VALUE;
6656 find_edge (cont_bb, trip_update_bb)->flags
6657 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6659 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6662 if (gimple_in_ssa_p (cfun))
6664 gimple_stmt_iterator psi;
6665 gimple phi;
6666 edge re, ene;
6667 edge_var_map *vm;
6668 size_t i;
6670 gcc_assert (fd->collapse == 1 && !broken_loop);
6672 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6673 remove arguments of the phi nodes in fin_bb. We need to create
6674 appropriate phi nodes in iter_part_bb instead. */
6675 se = single_pred_edge (fin_bb);
6676 re = single_succ_edge (trip_update_bb);
6677 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
6678 ene = single_succ_edge (entry_bb);
6680 psi = gsi_start_phis (fin_bb);
6681 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6682 gsi_next (&psi), ++i)
6684 gimple nphi;
6685 source_location locus;
6687 phi = gsi_stmt (psi);
6688 t = gimple_phi_result (phi);
6689 gcc_assert (t == redirect_edge_var_map_result (vm));
6690 nphi = create_phi_node (t, iter_part_bb);
6692 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6693 locus = gimple_phi_arg_location_from_edge (phi, se);
6695 /* A special case -- fd->loop.v is not yet computed in
6696 iter_part_bb, we need to use vextra instead. */
6697 if (t == fd->loop.v)
6698 t = vextra;
6699 add_phi_arg (nphi, t, ene, locus);
6700 locus = redirect_edge_var_map_location (vm);
6701 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6703 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6704 redirect_edge_var_map_clear (re);
6705 while (1)
6707 psi = gsi_start_phis (fin_bb);
6708 if (gsi_end_p (psi))
6709 break;
6710 remove_phi_node (&psi, false);
6713 /* Make phi node for trip. */
6714 phi = create_phi_node (trip_main, iter_part_bb);
6715 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6716 UNKNOWN_LOCATION);
6717 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6718 UNKNOWN_LOCATION);
6721 if (!broken_loop)
6722 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6723 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6724 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6725 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6726 recompute_dominator (CDI_DOMINATORS, fin_bb));
6727 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6728 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6729 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6730 recompute_dominator (CDI_DOMINATORS, body_bb));
6732 if (!broken_loop)
6734 struct loop *trip_loop = alloc_loop ();
6735 trip_loop->header = iter_part_bb;
6736 trip_loop->latch = trip_update_bb;
6737 add_loop (trip_loop, iter_part_bb->loop_father);
6739 if (!gimple_omp_for_combined_p (fd->for_stmt))
6741 struct loop *loop = alloc_loop ();
6742 loop->header = body_bb;
6743 if (collapse_bb == NULL)
6744 loop->latch = cont_bb;
6745 add_loop (loop, trip_loop);
6750 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
6751 Given parameters:
6752 for (V = N1; V cond N2; V += STEP) BODY;
6754 where COND is "<" or ">" or "!=", we generate pseudocode
6756 for (ind_var = low; ind_var < high; ind_var++)
6758 V = n1 + (ind_var * STEP)
6760 <BODY>
6763 In the above pseudocode, low and high are function parameters of the
6764 child function. In the function below, we are inserting a temp.
6765 variable that will be making a call to two OMP functions that will not be
6766 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
6767 with _Cilk_for). These functions are replaced with low and high
6768 by the function that handles taskreg. */
6771 static void
6772 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
6774 bool broken_loop = region->cont == NULL;
6775 basic_block entry_bb = region->entry;
6776 basic_block cont_bb = region->cont;
6778 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6779 gcc_assert (broken_loop
6780 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6781 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6782 basic_block l1_bb, l2_bb;
6784 if (!broken_loop)
6786 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6787 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6788 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6789 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6791 else
6793 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6794 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6795 l2_bb = single_succ (l1_bb);
6797 basic_block exit_bb = region->exit;
6798 basic_block l2_dom_bb = NULL;
6800 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
6802 /* Below statements until the "tree high_val = ..." are pseudo statements
6803 used to pass information to be used by expand_omp_taskreg.
6804 low_val and high_val will be replaced by the __low and __high
6805 parameter from the child function.
6807 The call_exprs part is a place-holder, it is mainly used
6808 to distinctly identify to the top-level part that this is
6809 where we should put low and high (reasoning given in header
6810 comment). */
6812 tree child_fndecl
6813 = gimple_omp_parallel_child_fn (last_stmt (region->outer->entry));
6814 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
6815 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
6817 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
6818 high_val = t;
6819 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
6820 low_val = t;
6822 gcc_assert (low_val && high_val);
6824 tree type = TREE_TYPE (low_val);
6825 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
6826 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6828 /* Not needed in SSA form right now. */
6829 gcc_assert (!gimple_in_ssa_p (cfun));
6830 if (l2_dom_bb == NULL)
6831 l2_dom_bb = l1_bb;
6833 tree n1 = low_val;
6834 tree n2 = high_val;
6836 gimple stmt = gimple_build_assign (ind_var, n1);
6838 /* Replace the GIMPLE_OMP_FOR statement. */
6839 gsi_replace (&gsi, stmt, true);
6841 if (!broken_loop)
6843 /* Code to control the increment goes in the CONT_BB. */
6844 gsi = gsi_last_bb (cont_bb);
6845 stmt = gsi_stmt (gsi);
6846 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6847 stmt = gimple_build_assign_with_ops (PLUS_EXPR, ind_var, ind_var,
6848 build_one_cst (type));
6850 /* Replace GIMPLE_OMP_CONTINUE. */
6851 gsi_replace (&gsi, stmt, true);
6854 /* Emit the condition in L1_BB. */
6855 gsi = gsi_after_labels (l1_bb);
6856 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
6857 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
6858 fd->loop.step);
6859 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
6860 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6861 fd->loop.n1, fold_convert (sizetype, t));
6862 else
6863 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6864 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
6865 t = fold_convert (TREE_TYPE (fd->loop.v), t);
6866 expand_omp_build_assign (&gsi, fd->loop.v, t);
6868 /* The condition is always '<' since the runtime will fill in the low
6869 and high values. */
6870 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
6871 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6873 /* Remove GIMPLE_OMP_RETURN. */
6874 gsi = gsi_last_bb (exit_bb);
6875 gsi_remove (&gsi, true);
6877 /* Connect the new blocks. */
6878 remove_edge (FALLTHRU_EDGE (entry_bb));
6880 edge e, ne;
6881 if (!broken_loop)
6883 remove_edge (BRANCH_EDGE (entry_bb));
6884 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6886 e = BRANCH_EDGE (l1_bb);
6887 ne = FALLTHRU_EDGE (l1_bb);
6888 e->flags = EDGE_TRUE_VALUE;
6890 else
6892 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6894 ne = single_succ_edge (l1_bb);
6895 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6898 ne->flags = EDGE_FALSE_VALUE;
6899 e->probability = REG_BR_PROB_BASE * 7 / 8;
6900 ne->probability = REG_BR_PROB_BASE / 8;
6902 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6903 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6904 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6906 if (!broken_loop)
6908 struct loop *loop = alloc_loop ();
6909 loop->header = l1_bb;
6910 loop->latch = cont_bb;
6911 add_loop (loop, l1_bb->loop_father);
6912 loop->safelen = INT_MAX;
6915 /* Pick the correct library function based on the precision of the
6916 induction variable type. */
6917 tree lib_fun = NULL_TREE;
6918 if (TYPE_PRECISION (type) == 32)
6919 lib_fun = cilk_for_32_fndecl;
6920 else if (TYPE_PRECISION (type) == 64)
6921 lib_fun = cilk_for_64_fndecl;
6922 else
6923 gcc_unreachable ();
6925 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
6927 /* WS_ARGS contains the library function flavor to call:
6928 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
6929 user-defined grain value. If the user does not define one, then zero
6930 is passed in by the parser. */
6931 vec_alloc (region->ws_args, 2);
6932 region->ws_args->quick_push (lib_fun);
6933 region->ws_args->quick_push (fd->chunk_size);
6936 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
6937 loop. Given parameters:
6939 for (V = N1; V cond N2; V += STEP) BODY;
6941 where COND is "<" or ">", we generate pseudocode
6943 V = N1;
6944 goto L1;
6946 BODY;
6947 V += STEP;
6949 if (V cond N2) goto L0; else goto L2;
6952 For collapsed loops, given parameters:
6953 collapse(3)
6954 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6955 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6956 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6957 BODY;
6959 we generate pseudocode
6961 if (cond3 is <)
6962 adj = STEP3 - 1;
6963 else
6964 adj = STEP3 + 1;
6965 count3 = (adj + N32 - N31) / STEP3;
6966 if (cond2 is <)
6967 adj = STEP2 - 1;
6968 else
6969 adj = STEP2 + 1;
6970 count2 = (adj + N22 - N21) / STEP2;
6971 if (cond1 is <)
6972 adj = STEP1 - 1;
6973 else
6974 adj = STEP1 + 1;
6975 count1 = (adj + N12 - N11) / STEP1;
6976 count = count1 * count2 * count3;
6977 V = 0;
6978 V1 = N11;
6979 V2 = N21;
6980 V3 = N31;
6981 goto L1;
6983 BODY;
6984 V += 1;
6985 V3 += STEP3;
6986 V2 += (V3 cond3 N32) ? 0 : STEP2;
6987 V3 = (V3 cond3 N32) ? V3 : N31;
6988 V1 += (V2 cond2 N22) ? 0 : STEP1;
6989 V2 = (V2 cond2 N22) ? V2 : N21;
6991 if (V < count) goto L0; else goto L2;
6996 static void
6997 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
6999 tree type, t;
7000 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
7001 gimple_stmt_iterator gsi;
7002 gimple stmt;
7003 bool broken_loop = region->cont == NULL;
7004 edge e, ne;
7005 tree *counts = NULL;
7006 int i;
7007 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7008 OMP_CLAUSE_SAFELEN);
7009 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7010 OMP_CLAUSE__SIMDUID_);
7011 tree n1, n2;
7013 type = TREE_TYPE (fd->loop.v);
7014 entry_bb = region->entry;
7015 cont_bb = region->cont;
7016 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7017 gcc_assert (broken_loop
7018 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7019 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7020 if (!broken_loop)
7022 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7023 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7024 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7025 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7027 else
7029 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7030 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7031 l2_bb = single_succ (l1_bb);
7033 exit_bb = region->exit;
7034 l2_dom_bb = NULL;
7036 gsi = gsi_last_bb (entry_bb);
7038 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7039 /* Not needed in SSA form right now. */
7040 gcc_assert (!gimple_in_ssa_p (cfun));
7041 if (fd->collapse > 1)
7043 int first_zero_iter = -1;
7044 basic_block zero_iter_bb = l2_bb;
7046 counts = XALLOCAVEC (tree, fd->collapse);
7047 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
7048 zero_iter_bb, first_zero_iter,
7049 l2_dom_bb);
7051 if (l2_dom_bb == NULL)
7052 l2_dom_bb = l1_bb;
7054 n1 = fd->loop.n1;
7055 n2 = fd->loop.n2;
7056 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7058 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7059 OMP_CLAUSE__LOOPTEMP_);
7060 gcc_assert (innerc);
7061 n1 = OMP_CLAUSE_DECL (innerc);
7062 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7063 OMP_CLAUSE__LOOPTEMP_);
7064 gcc_assert (innerc);
7065 n2 = OMP_CLAUSE_DECL (innerc);
7066 expand_omp_build_assign (&gsi, fd->loop.v,
7067 fold_convert (type, n1));
7068 if (fd->collapse > 1)
7070 gsi_prev (&gsi);
7071 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
7072 gsi_next (&gsi);
7075 else
7077 expand_omp_build_assign (&gsi, fd->loop.v,
7078 fold_convert (type, fd->loop.n1));
7079 if (fd->collapse > 1)
7080 for (i = 0; i < fd->collapse; i++)
7082 tree itype = TREE_TYPE (fd->loops[i].v);
7083 if (POINTER_TYPE_P (itype))
7084 itype = signed_type_for (itype);
7085 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
7086 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7090 /* Remove the GIMPLE_OMP_FOR statement. */
7091 gsi_remove (&gsi, true);
7093 if (!broken_loop)
7095 /* Code to control the increment goes in the CONT_BB. */
7096 gsi = gsi_last_bb (cont_bb);
7097 stmt = gsi_stmt (gsi);
7098 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7100 if (POINTER_TYPE_P (type))
7101 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
7102 else
7103 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
7104 expand_omp_build_assign (&gsi, fd->loop.v, t);
7106 if (fd->collapse > 1)
7108 i = fd->collapse - 1;
7109 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
7111 t = fold_convert (sizetype, fd->loops[i].step);
7112 t = fold_build_pointer_plus (fd->loops[i].v, t);
7114 else
7116 t = fold_convert (TREE_TYPE (fd->loops[i].v),
7117 fd->loops[i].step);
7118 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
7119 fd->loops[i].v, t);
7121 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7123 for (i = fd->collapse - 1; i > 0; i--)
7125 tree itype = TREE_TYPE (fd->loops[i].v);
7126 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
7127 if (POINTER_TYPE_P (itype2))
7128 itype2 = signed_type_for (itype2);
7129 t = build3 (COND_EXPR, itype2,
7130 build2 (fd->loops[i].cond_code, boolean_type_node,
7131 fd->loops[i].v,
7132 fold_convert (itype, fd->loops[i].n2)),
7133 build_int_cst (itype2, 0),
7134 fold_convert (itype2, fd->loops[i - 1].step));
7135 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
7136 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
7137 else
7138 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
7139 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
7141 t = build3 (COND_EXPR, itype,
7142 build2 (fd->loops[i].cond_code, boolean_type_node,
7143 fd->loops[i].v,
7144 fold_convert (itype, fd->loops[i].n2)),
7145 fd->loops[i].v,
7146 fold_convert (itype, fd->loops[i].n1));
7147 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7151 /* Remove GIMPLE_OMP_CONTINUE. */
7152 gsi_remove (&gsi, true);
7155 /* Emit the condition in L1_BB. */
7156 gsi = gsi_start_bb (l1_bb);
7158 t = fold_convert (type, n2);
7159 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7160 false, GSI_CONTINUE_LINKING);
7161 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
7162 stmt = gimple_build_cond_empty (t);
7163 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7164 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
7165 NULL, NULL)
7166 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
7167 NULL, NULL))
7169 gsi = gsi_for_stmt (stmt);
7170 gimple_regimplify_operands (stmt, &gsi);
7173 /* Remove GIMPLE_OMP_RETURN. */
7174 gsi = gsi_last_bb (exit_bb);
7175 gsi_remove (&gsi, true);
7177 /* Connect the new blocks. */
7178 remove_edge (FALLTHRU_EDGE (entry_bb));
7180 if (!broken_loop)
7182 remove_edge (BRANCH_EDGE (entry_bb));
7183 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7185 e = BRANCH_EDGE (l1_bb);
7186 ne = FALLTHRU_EDGE (l1_bb);
7187 e->flags = EDGE_TRUE_VALUE;
7189 else
7191 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7193 ne = single_succ_edge (l1_bb);
7194 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7197 ne->flags = EDGE_FALSE_VALUE;
7198 e->probability = REG_BR_PROB_BASE * 7 / 8;
7199 ne->probability = REG_BR_PROB_BASE / 8;
7201 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7202 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7203 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7205 if (!broken_loop)
7207 struct loop *loop = alloc_loop ();
7208 loop->header = l1_bb;
7209 loop->latch = cont_bb;
7210 add_loop (loop, l1_bb->loop_father);
7211 if (safelen == NULL_TREE)
7212 loop->safelen = INT_MAX;
7213 else
7215 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
7216 if (TREE_CODE (safelen) != INTEGER_CST)
7217 loop->safelen = 0;
7218 else if (!tree_fits_uhwi_p (safelen)
7219 || tree_to_uhwi (safelen) > INT_MAX)
7220 loop->safelen = INT_MAX;
7221 else
7222 loop->safelen = tree_to_uhwi (safelen);
7223 if (loop->safelen == 1)
7224 loop->safelen = 0;
7226 if (simduid)
7228 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
7229 cfun->has_simduid_loops = true;
7231 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7232 the loop. */
7233 if ((flag_tree_loop_vectorize
7234 || (!global_options_set.x_flag_tree_loop_vectorize
7235 && !global_options_set.x_flag_tree_vectorize))
7236 && flag_tree_loop_optimize
7237 && loop->safelen > 1)
7239 loop->force_vectorize = true;
7240 cfun->has_force_vectorize_loops = true;
7246 /* Expand the OpenMP loop defined by REGION. */
7248 static void
7249 expand_omp_for (struct omp_region *region, gimple inner_stmt)
7251 struct omp_for_data fd;
7252 struct omp_for_data_loop *loops;
7254 loops
7255 = (struct omp_for_data_loop *)
7256 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
7257 * sizeof (struct omp_for_data_loop));
7258 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
7259 region->sched_kind = fd.sched_kind;
7261 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
7262 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7263 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7264 if (region->cont)
7266 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
7267 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7268 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7270 else
7271 /* If there isn't a continue then this is a degerate case where
7272 the introduction of abnormal edges during lowering will prevent
7273 original loops from being detected. Fix that up. */
7274 loops_state_set (LOOPS_NEED_FIXUP);
7276 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
7277 expand_omp_simd (region, &fd);
7278 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
7279 expand_cilk_for (region, &fd);
7280 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
7281 && !fd.have_ordered)
7283 if (fd.chunk_size == NULL)
7284 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
7285 else
7286 expand_omp_for_static_chunk (region, &fd, inner_stmt);
7288 else
7290 int fn_index, start_ix, next_ix;
7292 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
7293 == GF_OMP_FOR_KIND_FOR);
7294 if (fd.chunk_size == NULL
7295 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
7296 fd.chunk_size = integer_zero_node;
7297 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
7298 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
7299 ? 3 : fd.sched_kind;
7300 fn_index += fd.have_ordered * 4;
7301 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
7302 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
7303 if (fd.iter_type == long_long_unsigned_type_node)
7305 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
7306 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
7307 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
7308 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
7310 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
7311 (enum built_in_function) next_ix, inner_stmt);
7314 if (gimple_in_ssa_p (cfun))
7315 update_ssa (TODO_update_ssa_only_virtuals);
7319 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7321 v = GOMP_sections_start (n);
7323 switch (v)
7325 case 0:
7326 goto L2;
7327 case 1:
7328 section 1;
7329 goto L1;
7330 case 2:
7332 case n:
7334 default:
7335 abort ();
7338 v = GOMP_sections_next ();
7339 goto L0;
7341 reduction;
7343 If this is a combined parallel sections, replace the call to
7344 GOMP_sections_start with call to GOMP_sections_next. */
7346 static void
7347 expand_omp_sections (struct omp_region *region)
7349 tree t, u, vin = NULL, vmain, vnext, l2;
7350 unsigned len;
7351 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7352 gimple_stmt_iterator si, switch_si;
7353 gimple sections_stmt, stmt, cont;
7354 edge_iterator ei;
7355 edge e;
7356 struct omp_region *inner;
7357 unsigned i, casei;
7358 bool exit_reachable = region->cont != NULL;
7360 gcc_assert (region->exit != NULL);
7361 entry_bb = region->entry;
7362 l0_bb = single_succ (entry_bb);
7363 l1_bb = region->cont;
7364 l2_bb = region->exit;
7365 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7366 l2 = gimple_block_label (l2_bb);
7367 else
7369 /* This can happen if there are reductions. */
7370 len = EDGE_COUNT (l0_bb->succs);
7371 gcc_assert (len > 0);
7372 e = EDGE_SUCC (l0_bb, len - 1);
7373 si = gsi_last_bb (e->dest);
7374 l2 = NULL_TREE;
7375 if (gsi_end_p (si)
7376 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7377 l2 = gimple_block_label (e->dest);
7378 else
7379 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7381 si = gsi_last_bb (e->dest);
7382 if (gsi_end_p (si)
7383 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7385 l2 = gimple_block_label (e->dest);
7386 break;
7390 if (exit_reachable)
7391 default_bb = create_empty_bb (l1_bb->prev_bb);
7392 else
7393 default_bb = create_empty_bb (l0_bb);
7395 /* We will build a switch() with enough cases for all the
7396 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7397 and a default case to abort if something goes wrong. */
7398 len = EDGE_COUNT (l0_bb->succs);
7400 /* Use vec::quick_push on label_vec throughout, since we know the size
7401 in advance. */
7402 auto_vec<tree> label_vec (len);
7404 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7405 GIMPLE_OMP_SECTIONS statement. */
7406 si = gsi_last_bb (entry_bb);
7407 sections_stmt = gsi_stmt (si);
7408 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7409 vin = gimple_omp_sections_control (sections_stmt);
7410 if (!is_combined_parallel (region))
7412 /* If we are not inside a combined parallel+sections region,
7413 call GOMP_sections_start. */
7414 t = build_int_cst (unsigned_type_node, len - 1);
7415 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7416 stmt = gimple_build_call (u, 1, t);
7418 else
7420 /* Otherwise, call GOMP_sections_next. */
7421 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7422 stmt = gimple_build_call (u, 0);
7424 gimple_call_set_lhs (stmt, vin);
7425 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7426 gsi_remove (&si, true);
7428 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7429 L0_BB. */
7430 switch_si = gsi_last_bb (l0_bb);
7431 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
7432 if (exit_reachable)
7434 cont = last_stmt (l1_bb);
7435 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
7436 vmain = gimple_omp_continue_control_use (cont);
7437 vnext = gimple_omp_continue_control_def (cont);
7439 else
7441 vmain = vin;
7442 vnext = NULL_TREE;
7445 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
7446 label_vec.quick_push (t);
7447 i = 1;
7449 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7450 for (inner = region->inner, casei = 1;
7451 inner;
7452 inner = inner->next, i++, casei++)
7454 basic_block s_entry_bb, s_exit_bb;
7456 /* Skip optional reduction region. */
7457 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7459 --i;
7460 --casei;
7461 continue;
7464 s_entry_bb = inner->entry;
7465 s_exit_bb = inner->exit;
7467 t = gimple_block_label (s_entry_bb);
7468 u = build_int_cst (unsigned_type_node, casei);
7469 u = build_case_label (u, NULL, t);
7470 label_vec.quick_push (u);
7472 si = gsi_last_bb (s_entry_bb);
7473 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7474 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7475 gsi_remove (&si, true);
7476 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7478 if (s_exit_bb == NULL)
7479 continue;
7481 si = gsi_last_bb (s_exit_bb);
7482 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7483 gsi_remove (&si, true);
7485 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7488 /* Error handling code goes in DEFAULT_BB. */
7489 t = gimple_block_label (default_bb);
7490 u = build_case_label (NULL, NULL, t);
7491 make_edge (l0_bb, default_bb, 0);
7492 add_bb_to_loop (default_bb, current_loops->tree_root);
7494 stmt = gimple_build_switch (vmain, u, label_vec);
7495 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7496 gsi_remove (&switch_si, true);
7498 si = gsi_start_bb (default_bb);
7499 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7500 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7502 if (exit_reachable)
7504 tree bfn_decl;
7506 /* Code to get the next section goes in L1_BB. */
7507 si = gsi_last_bb (l1_bb);
7508 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7510 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7511 stmt = gimple_build_call (bfn_decl, 0);
7512 gimple_call_set_lhs (stmt, vnext);
7513 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7514 gsi_remove (&si, true);
7516 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7519 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7520 si = gsi_last_bb (l2_bb);
7521 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7522 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7523 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7524 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7525 else
7526 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7527 stmt = gimple_build_call (t, 0);
7528 if (gimple_omp_return_lhs (gsi_stmt (si)))
7529 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7530 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7531 gsi_remove (&si, true);
7533 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7537 /* Expand code for an OpenMP single directive. We've already expanded
7538 much of the code, here we simply place the GOMP_barrier call. */
7540 static void
7541 expand_omp_single (struct omp_region *region)
7543 basic_block entry_bb, exit_bb;
7544 gimple_stmt_iterator si;
7546 entry_bb = region->entry;
7547 exit_bb = region->exit;
7549 si = gsi_last_bb (entry_bb);
7550 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7551 gsi_remove (&si, true);
7552 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7554 si = gsi_last_bb (exit_bb);
7555 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7557 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7558 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7560 gsi_remove (&si, true);
7561 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7565 /* Generic expansion for OpenMP synchronization directives: master,
7566 ordered and critical. All we need to do here is remove the entry
7567 and exit markers for REGION. */
7569 static void
7570 expand_omp_synch (struct omp_region *region)
7572 basic_block entry_bb, exit_bb;
7573 gimple_stmt_iterator si;
7575 entry_bb = region->entry;
7576 exit_bb = region->exit;
7578 si = gsi_last_bb (entry_bb);
7579 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7580 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7581 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7582 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7583 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7584 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7585 gsi_remove (&si, true);
7586 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7588 if (exit_bb)
7590 si = gsi_last_bb (exit_bb);
7591 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7592 gsi_remove (&si, true);
7593 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7597 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7598 operation as a normal volatile load. */
7600 static bool
7601 expand_omp_atomic_load (basic_block load_bb, tree addr,
7602 tree loaded_val, int index)
7604 enum built_in_function tmpbase;
7605 gimple_stmt_iterator gsi;
7606 basic_block store_bb;
7607 location_t loc;
7608 gimple stmt;
7609 tree decl, call, type, itype;
7611 gsi = gsi_last_bb (load_bb);
7612 stmt = gsi_stmt (gsi);
7613 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7614 loc = gimple_location (stmt);
7616 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7617 is smaller than word size, then expand_atomic_load assumes that the load
7618 is atomic. We could avoid the builtin entirely in this case. */
7620 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7621 decl = builtin_decl_explicit (tmpbase);
7622 if (decl == NULL_TREE)
7623 return false;
7625 type = TREE_TYPE (loaded_val);
7626 itype = TREE_TYPE (TREE_TYPE (decl));
7628 call = build_call_expr_loc (loc, decl, 2, addr,
7629 build_int_cst (NULL,
7630 gimple_omp_atomic_seq_cst_p (stmt)
7631 ? MEMMODEL_SEQ_CST
7632 : MEMMODEL_RELAXED));
7633 if (!useless_type_conversion_p (type, itype))
7634 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7635 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7637 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7638 gsi_remove (&gsi, true);
7640 store_bb = single_succ (load_bb);
7641 gsi = gsi_last_bb (store_bb);
7642 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7643 gsi_remove (&gsi, true);
7645 if (gimple_in_ssa_p (cfun))
7646 update_ssa (TODO_update_ssa_no_phi);
7648 return true;
7651 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7652 operation as a normal volatile store. */
7654 static bool
7655 expand_omp_atomic_store (basic_block load_bb, tree addr,
7656 tree loaded_val, tree stored_val, int index)
7658 enum built_in_function tmpbase;
7659 gimple_stmt_iterator gsi;
7660 basic_block store_bb = single_succ (load_bb);
7661 location_t loc;
7662 gimple stmt;
7663 tree decl, call, type, itype;
7664 enum machine_mode imode;
7665 bool exchange;
7667 gsi = gsi_last_bb (load_bb);
7668 stmt = gsi_stmt (gsi);
7669 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7671 /* If the load value is needed, then this isn't a store but an exchange. */
7672 exchange = gimple_omp_atomic_need_value_p (stmt);
7674 gsi = gsi_last_bb (store_bb);
7675 stmt = gsi_stmt (gsi);
7676 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7677 loc = gimple_location (stmt);
7679 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7680 is smaller than word size, then expand_atomic_store assumes that the store
7681 is atomic. We could avoid the builtin entirely in this case. */
7683 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7684 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7685 decl = builtin_decl_explicit (tmpbase);
7686 if (decl == NULL_TREE)
7687 return false;
7689 type = TREE_TYPE (stored_val);
7691 /* Dig out the type of the function's second argument. */
7692 itype = TREE_TYPE (decl);
7693 itype = TYPE_ARG_TYPES (itype);
7694 itype = TREE_CHAIN (itype);
7695 itype = TREE_VALUE (itype);
7696 imode = TYPE_MODE (itype);
7698 if (exchange && !can_atomic_exchange_p (imode, true))
7699 return false;
7701 if (!useless_type_conversion_p (itype, type))
7702 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7703 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7704 build_int_cst (NULL,
7705 gimple_omp_atomic_seq_cst_p (stmt)
7706 ? MEMMODEL_SEQ_CST
7707 : MEMMODEL_RELAXED));
7708 if (exchange)
7710 if (!useless_type_conversion_p (type, itype))
7711 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7712 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7715 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7716 gsi_remove (&gsi, true);
7718 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7719 gsi = gsi_last_bb (load_bb);
7720 gsi_remove (&gsi, true);
7722 if (gimple_in_ssa_p (cfun))
7723 update_ssa (TODO_update_ssa_no_phi);
7725 return true;
7728 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7729 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7730 size of the data type, and thus usable to find the index of the builtin
7731 decl. Returns false if the expression is not of the proper form. */
7733 static bool
7734 expand_omp_atomic_fetch_op (basic_block load_bb,
7735 tree addr, tree loaded_val,
7736 tree stored_val, int index)
7738 enum built_in_function oldbase, newbase, tmpbase;
7739 tree decl, itype, call;
7740 tree lhs, rhs;
7741 basic_block store_bb = single_succ (load_bb);
7742 gimple_stmt_iterator gsi;
7743 gimple stmt;
7744 location_t loc;
7745 enum tree_code code;
7746 bool need_old, need_new;
7747 enum machine_mode imode;
7748 bool seq_cst;
7750 /* We expect to find the following sequences:
7752 load_bb:
7753 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7755 store_bb:
7756 val = tmp OP something; (or: something OP tmp)
7757 GIMPLE_OMP_STORE (val)
7759 ???FIXME: Allow a more flexible sequence.
7760 Perhaps use data flow to pick the statements.
7764 gsi = gsi_after_labels (store_bb);
7765 stmt = gsi_stmt (gsi);
7766 loc = gimple_location (stmt);
7767 if (!is_gimple_assign (stmt))
7768 return false;
7769 gsi_next (&gsi);
7770 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7771 return false;
7772 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7773 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7774 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7775 gcc_checking_assert (!need_old || !need_new);
7777 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7778 return false;
7780 /* Check for one of the supported fetch-op operations. */
7781 code = gimple_assign_rhs_code (stmt);
7782 switch (code)
7784 case PLUS_EXPR:
7785 case POINTER_PLUS_EXPR:
7786 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7787 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7788 break;
7789 case MINUS_EXPR:
7790 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7791 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7792 break;
7793 case BIT_AND_EXPR:
7794 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7795 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7796 break;
7797 case BIT_IOR_EXPR:
7798 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7799 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7800 break;
7801 case BIT_XOR_EXPR:
7802 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7803 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7804 break;
7805 default:
7806 return false;
7809 /* Make sure the expression is of the proper form. */
7810 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7811 rhs = gimple_assign_rhs2 (stmt);
7812 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7813 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7814 rhs = gimple_assign_rhs1 (stmt);
7815 else
7816 return false;
7818 tmpbase = ((enum built_in_function)
7819 ((need_new ? newbase : oldbase) + index + 1));
7820 decl = builtin_decl_explicit (tmpbase);
7821 if (decl == NULL_TREE)
7822 return false;
7823 itype = TREE_TYPE (TREE_TYPE (decl));
7824 imode = TYPE_MODE (itype);
7826 /* We could test all of the various optabs involved, but the fact of the
7827 matter is that (with the exception of i486 vs i586 and xadd) all targets
7828 that support any atomic operaton optab also implements compare-and-swap.
7829 Let optabs.c take care of expanding any compare-and-swap loop. */
7830 if (!can_compare_and_swap_p (imode, true))
7831 return false;
7833 gsi = gsi_last_bb (load_bb);
7834 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7836 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7837 It only requires that the operation happen atomically. Thus we can
7838 use the RELAXED memory model. */
7839 call = build_call_expr_loc (loc, decl, 3, addr,
7840 fold_convert_loc (loc, itype, rhs),
7841 build_int_cst (NULL,
7842 seq_cst ? MEMMODEL_SEQ_CST
7843 : MEMMODEL_RELAXED));
7845 if (need_old || need_new)
7847 lhs = need_old ? loaded_val : stored_val;
7848 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7849 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7851 else
7852 call = fold_convert_loc (loc, void_type_node, call);
7853 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7854 gsi_remove (&gsi, true);
7856 gsi = gsi_last_bb (store_bb);
7857 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7858 gsi_remove (&gsi, true);
7859 gsi = gsi_last_bb (store_bb);
7860 gsi_remove (&gsi, true);
7862 if (gimple_in_ssa_p (cfun))
7863 update_ssa (TODO_update_ssa_no_phi);
7865 return true;
7868 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7870 oldval = *addr;
7871 repeat:
7872 newval = rhs; // with oldval replacing *addr in rhs
7873 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7874 if (oldval != newval)
7875 goto repeat;
7877 INDEX is log2 of the size of the data type, and thus usable to find the
7878 index of the builtin decl. */
7880 static bool
7881 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7882 tree addr, tree loaded_val, tree stored_val,
7883 int index)
7885 tree loadedi, storedi, initial, new_storedi, old_vali;
7886 tree type, itype, cmpxchg, iaddr;
7887 gimple_stmt_iterator si;
7888 basic_block loop_header = single_succ (load_bb);
7889 gimple phi, stmt;
7890 edge e;
7891 enum built_in_function fncode;
7893 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7894 order to use the RELAXED memory model effectively. */
7895 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7896 + index + 1);
7897 cmpxchg = builtin_decl_explicit (fncode);
7898 if (cmpxchg == NULL_TREE)
7899 return false;
7900 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7901 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7903 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7904 return false;
7906 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7907 si = gsi_last_bb (load_bb);
7908 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7910 /* For floating-point values, we'll need to view-convert them to integers
7911 so that we can perform the atomic compare and swap. Simplify the
7912 following code by always setting up the "i"ntegral variables. */
7913 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7915 tree iaddr_val;
7917 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7918 true), NULL);
7919 iaddr_val
7920 = force_gimple_operand_gsi (&si,
7921 fold_convert (TREE_TYPE (iaddr), addr),
7922 false, NULL_TREE, true, GSI_SAME_STMT);
7923 stmt = gimple_build_assign (iaddr, iaddr_val);
7924 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7925 loadedi = create_tmp_var (itype, NULL);
7926 if (gimple_in_ssa_p (cfun))
7927 loadedi = make_ssa_name (loadedi, NULL);
7929 else
7931 iaddr = addr;
7932 loadedi = loaded_val;
7935 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7936 tree loaddecl = builtin_decl_explicit (fncode);
7937 if (loaddecl)
7938 initial
7939 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
7940 build_call_expr (loaddecl, 2, iaddr,
7941 build_int_cst (NULL_TREE,
7942 MEMMODEL_RELAXED)));
7943 else
7944 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
7945 build_int_cst (TREE_TYPE (iaddr), 0));
7947 initial
7948 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
7949 GSI_SAME_STMT);
7951 /* Move the value to the LOADEDI temporary. */
7952 if (gimple_in_ssa_p (cfun))
7954 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
7955 phi = create_phi_node (loadedi, loop_header);
7956 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
7957 initial);
7959 else
7960 gsi_insert_before (&si,
7961 gimple_build_assign (loadedi, initial),
7962 GSI_SAME_STMT);
7963 if (loadedi != loaded_val)
7965 gimple_stmt_iterator gsi2;
7966 tree x;
7968 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
7969 gsi2 = gsi_start_bb (loop_header);
7970 if (gimple_in_ssa_p (cfun))
7972 gimple stmt;
7973 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7974 true, GSI_SAME_STMT);
7975 stmt = gimple_build_assign (loaded_val, x);
7976 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
7978 else
7980 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
7981 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7982 true, GSI_SAME_STMT);
7985 gsi_remove (&si, true);
7987 si = gsi_last_bb (store_bb);
7988 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
7990 if (iaddr == addr)
7991 storedi = stored_val;
7992 else
7993 storedi =
7994 force_gimple_operand_gsi (&si,
7995 build1 (VIEW_CONVERT_EXPR, itype,
7996 stored_val), true, NULL_TREE, true,
7997 GSI_SAME_STMT);
7999 /* Build the compare&swap statement. */
8000 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
8001 new_storedi = force_gimple_operand_gsi (&si,
8002 fold_convert (TREE_TYPE (loadedi),
8003 new_storedi),
8004 true, NULL_TREE,
8005 true, GSI_SAME_STMT);
8007 if (gimple_in_ssa_p (cfun))
8008 old_vali = loadedi;
8009 else
8011 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
8012 stmt = gimple_build_assign (old_vali, loadedi);
8013 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8015 stmt = gimple_build_assign (loadedi, new_storedi);
8016 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8019 /* Note that we always perform the comparison as an integer, even for
8020 floating point. This allows the atomic operation to properly
8021 succeed even with NaNs and -0.0. */
8022 stmt = gimple_build_cond_empty
8023 (build2 (NE_EXPR, boolean_type_node,
8024 new_storedi, old_vali));
8025 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8027 /* Update cfg. */
8028 e = single_succ_edge (store_bb);
8029 e->flags &= ~EDGE_FALLTHRU;
8030 e->flags |= EDGE_FALSE_VALUE;
8032 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
8034 /* Copy the new value to loadedi (we already did that before the condition
8035 if we are not in SSA). */
8036 if (gimple_in_ssa_p (cfun))
8038 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
8039 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
8042 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
8043 gsi_remove (&si, true);
8045 struct loop *loop = alloc_loop ();
8046 loop->header = loop_header;
8047 loop->latch = store_bb;
8048 add_loop (loop, loop_header->loop_father);
8050 if (gimple_in_ssa_p (cfun))
8051 update_ssa (TODO_update_ssa_no_phi);
8053 return true;
8056 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8058 GOMP_atomic_start ();
8059 *addr = rhs;
8060 GOMP_atomic_end ();
8062 The result is not globally atomic, but works so long as all parallel
8063 references are within #pragma omp atomic directives. According to
8064 responses received from omp@openmp.org, appears to be within spec.
8065 Which makes sense, since that's how several other compilers handle
8066 this situation as well.
8067 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
8068 expanding. STORED_VAL is the operand of the matching
8069 GIMPLE_OMP_ATOMIC_STORE.
8071 We replace
8072 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
8073 loaded_val = *addr;
8075 and replace
8076 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
8077 *addr = stored_val;
8080 static bool
8081 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
8082 tree addr, tree loaded_val, tree stored_val)
8084 gimple_stmt_iterator si;
8085 gimple stmt;
8086 tree t;
8088 si = gsi_last_bb (load_bb);
8089 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8091 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
8092 t = build_call_expr (t, 0);
8093 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8095 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
8096 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8097 gsi_remove (&si, true);
8099 si = gsi_last_bb (store_bb);
8100 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8102 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
8103 stored_val);
8104 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8106 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
8107 t = build_call_expr (t, 0);
8108 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8109 gsi_remove (&si, true);
8111 if (gimple_in_ssa_p (cfun))
8112 update_ssa (TODO_update_ssa_no_phi);
8113 return true;
8116 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
8117 using expand_omp_atomic_fetch_op. If it failed, we try to
8118 call expand_omp_atomic_pipeline, and if it fails too, the
8119 ultimate fallback is wrapping the operation in a mutex
8120 (expand_omp_atomic_mutex). REGION is the atomic region built
8121 by build_omp_regions_1(). */
8123 static void
8124 expand_omp_atomic (struct omp_region *region)
8126 basic_block load_bb = region->entry, store_bb = region->exit;
8127 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
8128 tree loaded_val = gimple_omp_atomic_load_lhs (load);
8129 tree addr = gimple_omp_atomic_load_rhs (load);
8130 tree stored_val = gimple_omp_atomic_store_val (store);
8131 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8132 HOST_WIDE_INT index;
8134 /* Make sure the type is one of the supported sizes. */
8135 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
8136 index = exact_log2 (index);
8137 if (index >= 0 && index <= 4)
8139 unsigned int align = TYPE_ALIGN_UNIT (type);
8141 /* __sync builtins require strict data alignment. */
8142 if (exact_log2 (align) >= index)
8144 /* Atomic load. */
8145 if (loaded_val == stored_val
8146 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8147 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8148 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8149 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
8150 return;
8152 /* Atomic store. */
8153 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8154 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8155 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8156 && store_bb == single_succ (load_bb)
8157 && first_stmt (store_bb) == store
8158 && expand_omp_atomic_store (load_bb, addr, loaded_val,
8159 stored_val, index))
8160 return;
8162 /* When possible, use specialized atomic update functions. */
8163 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
8164 && store_bb == single_succ (load_bb)
8165 && expand_omp_atomic_fetch_op (load_bb, addr,
8166 loaded_val, stored_val, index))
8167 return;
8169 /* If we don't have specialized __sync builtins, try and implement
8170 as a compare and swap loop. */
8171 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
8172 loaded_val, stored_val, index))
8173 return;
8177 /* The ultimate fallback is wrapping the operation in a mutex. */
8178 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
8182 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
8184 static void
8185 expand_omp_target (struct omp_region *region)
8187 basic_block entry_bb, exit_bb, new_bb;
8188 struct function *child_cfun = NULL;
8189 tree child_fn = NULL_TREE, block, t;
8190 gimple_stmt_iterator gsi;
8191 gimple entry_stmt, stmt;
8192 edge e;
8194 entry_stmt = last_stmt (region->entry);
8195 new_bb = region->entry;
8196 int kind = gimple_omp_target_kind (entry_stmt);
8197 if (kind == GF_OMP_TARGET_KIND_REGION)
8199 child_fn = gimple_omp_target_child_fn (entry_stmt);
8200 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
8203 entry_bb = region->entry;
8204 exit_bb = region->exit;
8206 if (kind == GF_OMP_TARGET_KIND_REGION)
8208 unsigned srcidx, dstidx, num;
8210 /* If the target region needs data sent from the parent
8211 function, then the very first statement (except possible
8212 tree profile counter updates) of the parallel body
8213 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
8214 &.OMP_DATA_O is passed as an argument to the child function,
8215 we need to replace it with the argument as seen by the child
8216 function.
8218 In most cases, this will end up being the identity assignment
8219 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
8220 a function call that has been inlined, the original PARM_DECL
8221 .OMP_DATA_I may have been converted into a different local
8222 variable. In which case, we need to keep the assignment. */
8223 if (gimple_omp_target_data_arg (entry_stmt))
8225 basic_block entry_succ_bb = single_succ (entry_bb);
8226 gimple_stmt_iterator gsi;
8227 tree arg;
8228 gimple tgtcopy_stmt = NULL;
8229 tree sender
8230 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
8232 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
8234 gcc_assert (!gsi_end_p (gsi));
8235 stmt = gsi_stmt (gsi);
8236 if (gimple_code (stmt) != GIMPLE_ASSIGN)
8237 continue;
8239 if (gimple_num_ops (stmt) == 2)
8241 tree arg = gimple_assign_rhs1 (stmt);
8243 /* We're ignoring the subcode because we're
8244 effectively doing a STRIP_NOPS. */
8246 if (TREE_CODE (arg) == ADDR_EXPR
8247 && TREE_OPERAND (arg, 0) == sender)
8249 tgtcopy_stmt = stmt;
8250 break;
8255 gcc_assert (tgtcopy_stmt != NULL);
8256 arg = DECL_ARGUMENTS (child_fn);
8258 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
8259 gsi_remove (&gsi, true);
8262 /* Declare local variables needed in CHILD_CFUN. */
8263 block = DECL_INITIAL (child_fn);
8264 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
8265 /* The gimplifier could record temporaries in target block
8266 rather than in containing function's local_decls chain,
8267 which would mean cgraph missed finalizing them. Do it now. */
8268 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
8269 if (TREE_CODE (t) == VAR_DECL
8270 && TREE_STATIC (t)
8271 && !DECL_EXTERNAL (t))
8272 varpool_node::finalize_decl (t);
8273 DECL_SAVED_TREE (child_fn) = NULL;
8274 /* We'll create a CFG for child_fn, so no gimple body is needed. */
8275 gimple_set_body (child_fn, NULL);
8276 TREE_USED (block) = 1;
8278 /* Reset DECL_CONTEXT on function arguments. */
8279 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
8280 DECL_CONTEXT (t) = child_fn;
8282 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
8283 so that it can be moved to the child function. */
8284 gsi = gsi_last_bb (entry_bb);
8285 stmt = gsi_stmt (gsi);
8286 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
8287 && gimple_omp_target_kind (stmt)
8288 == GF_OMP_TARGET_KIND_REGION);
8289 gsi_remove (&gsi, true);
8290 e = split_block (entry_bb, stmt);
8291 entry_bb = e->dest;
8292 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8294 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
8295 if (exit_bb)
8297 gsi = gsi_last_bb (exit_bb);
8298 gcc_assert (!gsi_end_p (gsi)
8299 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
8300 stmt = gimple_build_return (NULL);
8301 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8302 gsi_remove (&gsi, true);
8305 /* Move the target region into CHILD_CFUN. */
8307 block = gimple_block (entry_stmt);
8309 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
8310 if (exit_bb)
8311 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
8312 /* When the OMP expansion process cannot guarantee an up-to-date
8313 loop tree arrange for the child function to fixup loops. */
8314 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8315 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8317 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8318 num = vec_safe_length (child_cfun->local_decls);
8319 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8321 t = (*child_cfun->local_decls)[srcidx];
8322 if (DECL_CONTEXT (t) == cfun->decl)
8323 continue;
8324 if (srcidx != dstidx)
8325 (*child_cfun->local_decls)[dstidx] = t;
8326 dstidx++;
8328 if (dstidx != num)
8329 vec_safe_truncate (child_cfun->local_decls, dstidx);
8331 /* Inform the callgraph about the new function. */
8332 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
8333 cgraph_node::add_new_function (child_fn, true);
8335 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8336 fixed in a following pass. */
8337 push_cfun (child_cfun);
8338 cgraph_edge::rebuild_edges ();
8340 /* Some EH regions might become dead, see PR34608. If
8341 pass_cleanup_cfg isn't the first pass to happen with the
8342 new child, these dead EH edges might cause problems.
8343 Clean them up now. */
8344 if (flag_exceptions)
8346 basic_block bb;
8347 bool changed = false;
8349 FOR_EACH_BB_FN (bb, cfun)
8350 changed |= gimple_purge_dead_eh_edges (bb);
8351 if (changed)
8352 cleanup_tree_cfg ();
8354 pop_cfun ();
8357 /* Emit a library call to launch the target region, or do data
8358 transfers. */
8359 tree t1, t2, t3, t4, device, cond, c, clauses;
8360 enum built_in_function start_ix;
8361 location_t clause_loc;
8363 clauses = gimple_omp_target_clauses (entry_stmt);
8365 if (kind == GF_OMP_TARGET_KIND_REGION)
8366 start_ix = BUILT_IN_GOMP_TARGET;
8367 else if (kind == GF_OMP_TARGET_KIND_DATA)
8368 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8369 else
8370 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8372 /* By default, the value of DEVICE is -1 (let runtime library choose)
8373 and there is no conditional. */
8374 cond = NULL_TREE;
8375 device = build_int_cst (integer_type_node, -1);
8377 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
8378 if (c)
8379 cond = OMP_CLAUSE_IF_EXPR (c);
8381 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
8382 if (c)
8384 device = OMP_CLAUSE_DEVICE_ID (c);
8385 clause_loc = OMP_CLAUSE_LOCATION (c);
8387 else
8388 clause_loc = gimple_location (entry_stmt);
8390 /* Ensure 'device' is of the correct type. */
8391 device = fold_convert_loc (clause_loc, integer_type_node, device);
8393 /* If we found the clause 'if (cond)', build
8394 (cond ? device : -2). */
8395 if (cond)
8397 cond = gimple_boolify (cond);
8399 basic_block cond_bb, then_bb, else_bb;
8400 edge e;
8401 tree tmp_var;
8403 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
8404 if (kind != GF_OMP_TARGET_KIND_REGION)
8406 gsi = gsi_last_bb (new_bb);
8407 gsi_prev (&gsi);
8408 e = split_block (new_bb, gsi_stmt (gsi));
8410 else
8411 e = split_block (new_bb, NULL);
8412 cond_bb = e->src;
8413 new_bb = e->dest;
8414 remove_edge (e);
8416 then_bb = create_empty_bb (cond_bb);
8417 else_bb = create_empty_bb (then_bb);
8418 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
8419 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
8421 stmt = gimple_build_cond_empty (cond);
8422 gsi = gsi_last_bb (cond_bb);
8423 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8425 gsi = gsi_start_bb (then_bb);
8426 stmt = gimple_build_assign (tmp_var, device);
8427 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8429 gsi = gsi_start_bb (else_bb);
8430 stmt = gimple_build_assign (tmp_var,
8431 build_int_cst (integer_type_node, -2));
8432 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8434 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
8435 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
8436 add_bb_to_loop (then_bb, cond_bb->loop_father);
8437 add_bb_to_loop (else_bb, cond_bb->loop_father);
8438 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
8439 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
8441 device = tmp_var;
8444 gsi = gsi_last_bb (new_bb);
8445 t = gimple_omp_target_data_arg (entry_stmt);
8446 if (t == NULL)
8448 t1 = size_zero_node;
8449 t2 = build_zero_cst (ptr_type_node);
8450 t3 = t2;
8451 t4 = t2;
8453 else
8455 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8456 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8457 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8458 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8459 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8462 gimple g;
8463 /* FIXME: This will be address of
8464 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8465 symbol, as soon as the linker plugin is able to create it for us. */
8466 tree openmp_target = build_zero_cst (ptr_type_node);
8467 if (kind == GF_OMP_TARGET_KIND_REGION)
8469 tree fnaddr = build_fold_addr_expr (child_fn);
8470 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8471 device, fnaddr, openmp_target, t1, t2, t3, t4);
8473 else
8474 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8475 device, openmp_target, t1, t2, t3, t4);
8476 gimple_set_location (g, gimple_location (entry_stmt));
8477 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8478 if (kind != GF_OMP_TARGET_KIND_REGION)
8480 g = gsi_stmt (gsi);
8481 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8482 gsi_remove (&gsi, true);
8484 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8486 gsi = gsi_last_bb (region->exit);
8487 g = gsi_stmt (gsi);
8488 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8489 gsi_remove (&gsi, true);
8494 /* Expand the parallel region tree rooted at REGION. Expansion
8495 proceeds in depth-first order. Innermost regions are expanded
8496 first. This way, parallel regions that require a new function to
8497 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8498 internal dependencies in their body. */
8500 static void
8501 expand_omp (struct omp_region *region)
8503 while (region)
8505 location_t saved_location;
8506 gimple inner_stmt = NULL;
8508 /* First, determine whether this is a combined parallel+workshare
8509 region. */
8510 if (region->type == GIMPLE_OMP_PARALLEL)
8511 determine_parallel_type (region);
8513 if (region->type == GIMPLE_OMP_FOR
8514 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8515 inner_stmt = last_stmt (region->inner->entry);
8517 if (region->inner)
8518 expand_omp (region->inner);
8520 saved_location = input_location;
8521 if (gimple_has_location (last_stmt (region->entry)))
8522 input_location = gimple_location (last_stmt (region->entry));
8524 switch (region->type)
8526 case GIMPLE_OMP_PARALLEL:
8527 case GIMPLE_OMP_TASK:
8528 expand_omp_taskreg (region);
8529 break;
8531 case GIMPLE_OMP_FOR:
8532 expand_omp_for (region, inner_stmt);
8533 break;
8535 case GIMPLE_OMP_SECTIONS:
8536 expand_omp_sections (region);
8537 break;
8539 case GIMPLE_OMP_SECTION:
8540 /* Individual omp sections are handled together with their
8541 parent GIMPLE_OMP_SECTIONS region. */
8542 break;
8544 case GIMPLE_OMP_SINGLE:
8545 expand_omp_single (region);
8546 break;
8548 case GIMPLE_OMP_MASTER:
8549 case GIMPLE_OMP_TASKGROUP:
8550 case GIMPLE_OMP_ORDERED:
8551 case GIMPLE_OMP_CRITICAL:
8552 case GIMPLE_OMP_TEAMS:
8553 expand_omp_synch (region);
8554 break;
8556 case GIMPLE_OMP_ATOMIC_LOAD:
8557 expand_omp_atomic (region);
8558 break;
8560 case GIMPLE_OMP_TARGET:
8561 expand_omp_target (region);
8562 break;
8564 default:
8565 gcc_unreachable ();
8568 input_location = saved_location;
8569 region = region->next;
8574 /* Helper for build_omp_regions. Scan the dominator tree starting at
8575 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8576 true, the function ends once a single tree is built (otherwise, whole
8577 forest of OMP constructs may be built). */
8579 static void
8580 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8581 bool single_tree)
8583 gimple_stmt_iterator gsi;
8584 gimple stmt;
8585 basic_block son;
8587 gsi = gsi_last_bb (bb);
8588 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8590 struct omp_region *region;
8591 enum gimple_code code;
8593 stmt = gsi_stmt (gsi);
8594 code = gimple_code (stmt);
8595 if (code == GIMPLE_OMP_RETURN)
8597 /* STMT is the return point out of region PARENT. Mark it
8598 as the exit point and make PARENT the immediately
8599 enclosing region. */
8600 gcc_assert (parent);
8601 region = parent;
8602 region->exit = bb;
8603 parent = parent->outer;
8605 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8607 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8608 GIMPLE_OMP_RETURN, but matches with
8609 GIMPLE_OMP_ATOMIC_LOAD. */
8610 gcc_assert (parent);
8611 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8612 region = parent;
8613 region->exit = bb;
8614 parent = parent->outer;
8617 else if (code == GIMPLE_OMP_CONTINUE)
8619 gcc_assert (parent);
8620 parent->cont = bb;
8622 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8624 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8625 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8628 else if (code == GIMPLE_OMP_TARGET
8629 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8630 new_omp_region (bb, code, parent);
8631 else
8633 /* Otherwise, this directive becomes the parent for a new
8634 region. */
8635 region = new_omp_region (bb, code, parent);
8636 parent = region;
8640 if (single_tree && !parent)
8641 return;
8643 for (son = first_dom_son (CDI_DOMINATORS, bb);
8644 son;
8645 son = next_dom_son (CDI_DOMINATORS, son))
8646 build_omp_regions_1 (son, parent, single_tree);
8649 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8650 root_omp_region. */
8652 static void
8653 build_omp_regions_root (basic_block root)
8655 gcc_assert (root_omp_region == NULL);
8656 build_omp_regions_1 (root, NULL, true);
8657 gcc_assert (root_omp_region != NULL);
8660 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8662 void
8663 omp_expand_local (basic_block head)
8665 build_omp_regions_root (head);
8666 if (dump_file && (dump_flags & TDF_DETAILS))
8668 fprintf (dump_file, "\nOMP region tree\n\n");
8669 dump_omp_region (dump_file, root_omp_region, 0);
8670 fprintf (dump_file, "\n");
8673 remove_exit_barriers (root_omp_region);
8674 expand_omp (root_omp_region);
8676 free_omp_regions ();
8679 /* Scan the CFG and build a tree of OMP regions. Return the root of
8680 the OMP region tree. */
8682 static void
8683 build_omp_regions (void)
8685 gcc_assert (root_omp_region == NULL);
8686 calculate_dominance_info (CDI_DOMINATORS);
8687 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8690 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8692 static unsigned int
8693 execute_expand_omp (void)
8695 build_omp_regions ();
8697 if (!root_omp_region)
8698 return 0;
8700 if (dump_file)
8702 fprintf (dump_file, "\nOMP region tree\n\n");
8703 dump_omp_region (dump_file, root_omp_region, 0);
8704 fprintf (dump_file, "\n");
8707 remove_exit_barriers (root_omp_region);
8709 expand_omp (root_omp_region);
8711 cleanup_tree_cfg ();
8713 free_omp_regions ();
8715 return 0;
8718 /* OMP expansion -- the default pass, run before creation of SSA form. */
8720 namespace {
8722 const pass_data pass_data_expand_omp =
8724 GIMPLE_PASS, /* type */
8725 "ompexp", /* name */
8726 OPTGROUP_NONE, /* optinfo_flags */
8727 TV_NONE, /* tv_id */
8728 PROP_gimple_any, /* properties_required */
8729 0, /* properties_provided */
8730 0, /* properties_destroyed */
8731 0, /* todo_flags_start */
8732 0, /* todo_flags_finish */
8735 class pass_expand_omp : public gimple_opt_pass
8737 public:
8738 pass_expand_omp (gcc::context *ctxt)
8739 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8742 /* opt_pass methods: */
8743 virtual bool gate (function *)
8745 return ((flag_openmp != 0 || flag_openmp_simd != 0
8746 || flag_cilkplus != 0) && !seen_error ());
8749 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8751 }; // class pass_expand_omp
8753 } // anon namespace
8755 gimple_opt_pass *
8756 make_pass_expand_omp (gcc::context *ctxt)
8758 return new pass_expand_omp (ctxt);
8761 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8763 /* If ctx is a worksharing context inside of a cancellable parallel
8764 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8765 and conditional branch to parallel's cancel_label to handle
8766 cancellation in the implicit barrier. */
8768 static void
8769 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8771 gimple omp_return = gimple_seq_last_stmt (*body);
8772 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8773 if (gimple_omp_return_nowait_p (omp_return))
8774 return;
8775 if (ctx->outer
8776 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8777 && ctx->outer->cancellable)
8779 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
8780 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
8781 tree lhs = create_tmp_var (c_bool_type, NULL);
8782 gimple_omp_return_set_lhs (omp_return, lhs);
8783 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8784 gimple g = gimple_build_cond (NE_EXPR, lhs,
8785 fold_convert (c_bool_type,
8786 boolean_false_node),
8787 ctx->outer->cancel_label, fallthru_label);
8788 gimple_seq_add_stmt (body, g);
8789 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8793 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8794 CTX is the enclosing OMP context for the current statement. */
8796 static void
8797 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8799 tree block, control;
8800 gimple_stmt_iterator tgsi;
8801 gimple stmt, new_stmt, bind, t;
8802 gimple_seq ilist, dlist, olist, new_body;
8804 stmt = gsi_stmt (*gsi_p);
8806 push_gimplify_context ();
8808 dlist = NULL;
8809 ilist = NULL;
8810 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8811 &ilist, &dlist, ctx, NULL);
8813 new_body = gimple_omp_body (stmt);
8814 gimple_omp_set_body (stmt, NULL);
8815 tgsi = gsi_start (new_body);
8816 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8818 omp_context *sctx;
8819 gimple sec_start;
8821 sec_start = gsi_stmt (tgsi);
8822 sctx = maybe_lookup_ctx (sec_start);
8823 gcc_assert (sctx);
8825 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8826 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8827 GSI_CONTINUE_LINKING);
8828 gimple_omp_set_body (sec_start, NULL);
8830 if (gsi_one_before_end_p (tgsi))
8832 gimple_seq l = NULL;
8833 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8834 &l, ctx);
8835 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8836 gimple_omp_section_set_last (sec_start);
8839 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8840 GSI_CONTINUE_LINKING);
8843 block = make_node (BLOCK);
8844 bind = gimple_build_bind (NULL, new_body, block);
8846 olist = NULL;
8847 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
8849 block = make_node (BLOCK);
8850 new_stmt = gimple_build_bind (NULL, NULL, block);
8851 gsi_replace (gsi_p, new_stmt, true);
8853 pop_gimplify_context (new_stmt);
8854 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8855 BLOCK_VARS (block) = gimple_bind_vars (bind);
8856 if (BLOCK_VARS (block))
8857 TREE_USED (block) = 1;
8859 new_body = NULL;
8860 gimple_seq_add_seq (&new_body, ilist);
8861 gimple_seq_add_stmt (&new_body, stmt);
8862 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8863 gimple_seq_add_stmt (&new_body, bind);
8865 control = create_tmp_var (unsigned_type_node, ".section");
8866 t = gimple_build_omp_continue (control, control);
8867 gimple_omp_sections_set_control (stmt, control);
8868 gimple_seq_add_stmt (&new_body, t);
8870 gimple_seq_add_seq (&new_body, olist);
8871 if (ctx->cancellable)
8872 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
8873 gimple_seq_add_seq (&new_body, dlist);
8875 new_body = maybe_catch_exception (new_body);
8877 t = gimple_build_omp_return
8878 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8879 OMP_CLAUSE_NOWAIT));
8880 gimple_seq_add_stmt (&new_body, t);
8881 maybe_add_implicit_barrier_cancel (ctx, &new_body);
8883 gimple_bind_set_body (new_stmt, new_body);
8887 /* A subroutine of lower_omp_single. Expand the simple form of
8888 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8890 if (GOMP_single_start ())
8891 BODY;
8892 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8894 FIXME. It may be better to delay expanding the logic of this until
8895 pass_expand_omp. The expanded logic may make the job more difficult
8896 to a synchronization analysis pass. */
8898 static void
8899 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
8901 location_t loc = gimple_location (single_stmt);
8902 tree tlabel = create_artificial_label (loc);
8903 tree flabel = create_artificial_label (loc);
8904 gimple call, cond;
8905 tree lhs, decl;
8907 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
8908 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8909 call = gimple_build_call (decl, 0);
8910 gimple_call_set_lhs (call, lhs);
8911 gimple_seq_add_stmt (pre_p, call);
8913 cond = gimple_build_cond (EQ_EXPR, lhs,
8914 fold_convert_loc (loc, TREE_TYPE (lhs),
8915 boolean_true_node),
8916 tlabel, flabel);
8917 gimple_seq_add_stmt (pre_p, cond);
8918 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
8919 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8920 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
8924 /* A subroutine of lower_omp_single. Expand the simple form of
8925 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
8927 #pragma omp single copyprivate (a, b, c)
8929 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
8932 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
8934 BODY;
8935 copyout.a = a;
8936 copyout.b = b;
8937 copyout.c = c;
8938 GOMP_single_copy_end (&copyout);
8940 else
8942 a = copyout_p->a;
8943 b = copyout_p->b;
8944 c = copyout_p->c;
8946 GOMP_barrier ();
8949 FIXME. It may be better to delay expanding the logic of this until
8950 pass_expand_omp. The expanded logic may make the job more difficult
8951 to a synchronization analysis pass. */
8953 static void
8954 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
8956 tree ptr_type, t, l0, l1, l2, bfn_decl;
8957 gimple_seq copyin_seq;
8958 location_t loc = gimple_location (single_stmt);
8960 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
8962 ptr_type = build_pointer_type (ctx->record_type);
8963 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
8965 l0 = create_artificial_label (loc);
8966 l1 = create_artificial_label (loc);
8967 l2 = create_artificial_label (loc);
8969 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
8970 t = build_call_expr_loc (loc, bfn_decl, 0);
8971 t = fold_convert_loc (loc, ptr_type, t);
8972 gimplify_assign (ctx->receiver_decl, t, pre_p);
8974 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
8975 build_int_cst (ptr_type, 0));
8976 t = build3 (COND_EXPR, void_type_node, t,
8977 build_and_jump (&l0), build_and_jump (&l1));
8978 gimplify_and_add (t, pre_p);
8980 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
8982 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8984 copyin_seq = NULL;
8985 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
8986 &copyin_seq, ctx);
8988 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
8989 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
8990 t = build_call_expr_loc (loc, bfn_decl, 1, t);
8991 gimplify_and_add (t, pre_p);
8993 t = build_and_jump (&l2);
8994 gimplify_and_add (t, pre_p);
8996 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
8998 gimple_seq_add_seq (pre_p, copyin_seq);
9000 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
9004 /* Expand code for an OpenMP single directive. */
9006 static void
9007 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9009 tree block;
9010 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
9011 gimple_seq bind_body, bind_body_tail = NULL, dlist;
9013 push_gimplify_context ();
9015 block = make_node (BLOCK);
9016 bind = gimple_build_bind (NULL, NULL, block);
9017 gsi_replace (gsi_p, bind, true);
9018 bind_body = NULL;
9019 dlist = NULL;
9020 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
9021 &bind_body, &dlist, ctx, NULL);
9022 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
9024 gimple_seq_add_stmt (&bind_body, single_stmt);
9026 if (ctx->record_type)
9027 lower_omp_single_copy (single_stmt, &bind_body, ctx);
9028 else
9029 lower_omp_single_simple (single_stmt, &bind_body);
9031 gimple_omp_set_body (single_stmt, NULL);
9033 gimple_seq_add_seq (&bind_body, dlist);
9035 bind_body = maybe_catch_exception (bind_body);
9037 t = gimple_build_omp_return
9038 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
9039 OMP_CLAUSE_NOWAIT));
9040 gimple_seq_add_stmt (&bind_body_tail, t);
9041 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
9042 if (ctx->record_type)
9044 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
9045 tree clobber = build_constructor (ctx->record_type, NULL);
9046 TREE_THIS_VOLATILE (clobber) = 1;
9047 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
9048 clobber), GSI_SAME_STMT);
9050 gimple_seq_add_seq (&bind_body, bind_body_tail);
9051 gimple_bind_set_body (bind, bind_body);
9053 pop_gimplify_context (bind);
9055 gimple_bind_append_vars (bind, ctx->block_vars);
9056 BLOCK_VARS (block) = ctx->block_vars;
9057 if (BLOCK_VARS (block))
9058 TREE_USED (block) = 1;
9062 /* Expand code for an OpenMP master directive. */
9064 static void
9065 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9067 tree block, lab = NULL, x, bfn_decl;
9068 gimple stmt = gsi_stmt (*gsi_p), bind;
9069 location_t loc = gimple_location (stmt);
9070 gimple_seq tseq;
9072 push_gimplify_context ();
9074 block = make_node (BLOCK);
9075 bind = gimple_build_bind (NULL, NULL, block);
9076 gsi_replace (gsi_p, bind, true);
9077 gimple_bind_add_stmt (bind, stmt);
9079 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9080 x = build_call_expr_loc (loc, bfn_decl, 0);
9081 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
9082 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
9083 tseq = NULL;
9084 gimplify_and_add (x, &tseq);
9085 gimple_bind_add_seq (bind, tseq);
9087 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9088 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9089 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9090 gimple_omp_set_body (stmt, NULL);
9092 gimple_bind_add_stmt (bind, gimple_build_label (lab));
9094 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9096 pop_gimplify_context (bind);
9098 gimple_bind_append_vars (bind, ctx->block_vars);
9099 BLOCK_VARS (block) = ctx->block_vars;
9103 /* Expand code for an OpenMP taskgroup directive. */
9105 static void
9106 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9108 gimple stmt = gsi_stmt (*gsi_p), bind, x;
9109 tree block = make_node (BLOCK);
9111 bind = gimple_build_bind (NULL, NULL, block);
9112 gsi_replace (gsi_p, bind, true);
9113 gimple_bind_add_stmt (bind, stmt);
9115 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
9117 gimple_bind_add_stmt (bind, x);
9119 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9120 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9121 gimple_omp_set_body (stmt, NULL);
9123 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9125 gimple_bind_append_vars (bind, ctx->block_vars);
9126 BLOCK_VARS (block) = ctx->block_vars;
9130 /* Expand code for an OpenMP ordered directive. */
9132 static void
9133 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9135 tree block;
9136 gimple stmt = gsi_stmt (*gsi_p), bind, x;
9138 push_gimplify_context ();
9140 block = make_node (BLOCK);
9141 bind = gimple_build_bind (NULL, NULL, block);
9142 gsi_replace (gsi_p, bind, true);
9143 gimple_bind_add_stmt (bind, stmt);
9145 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
9147 gimple_bind_add_stmt (bind, x);
9149 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9150 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9151 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9152 gimple_omp_set_body (stmt, NULL);
9154 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
9155 gimple_bind_add_stmt (bind, x);
9157 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9159 pop_gimplify_context (bind);
9161 gimple_bind_append_vars (bind, ctx->block_vars);
9162 BLOCK_VARS (block) = gimple_bind_vars (bind);
9166 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
9167 substitution of a couple of function calls. But in the NAMED case,
9168 requires that languages coordinate a symbol name. It is therefore
9169 best put here in common code. */
9171 static GTY((param1_is (tree), param2_is (tree)))
9172 splay_tree critical_name_mutexes;
9174 static void
9175 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9177 tree block;
9178 tree name, lock, unlock;
9179 gimple stmt = gsi_stmt (*gsi_p), bind;
9180 location_t loc = gimple_location (stmt);
9181 gimple_seq tbody;
9183 name = gimple_omp_critical_name (stmt);
9184 if (name)
9186 tree decl;
9187 splay_tree_node n;
9189 if (!critical_name_mutexes)
9190 critical_name_mutexes
9191 = splay_tree_new_ggc (splay_tree_compare_pointers,
9192 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9193 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9195 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
9196 if (n == NULL)
9198 char *new_str;
9200 decl = create_tmp_var_raw (ptr_type_node, NULL);
9202 new_str = ACONCAT ((".gomp_critical_user_",
9203 IDENTIFIER_POINTER (name), NULL));
9204 DECL_NAME (decl) = get_identifier (new_str);
9205 TREE_PUBLIC (decl) = 1;
9206 TREE_STATIC (decl) = 1;
9207 DECL_COMMON (decl) = 1;
9208 DECL_ARTIFICIAL (decl) = 1;
9209 DECL_IGNORED_P (decl) = 1;
9210 varpool_node::finalize_decl (decl);
9212 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
9213 (splay_tree_value) decl);
9215 else
9216 decl = (tree) n->value;
9218 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
9219 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
9221 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
9222 unlock = build_call_expr_loc (loc, unlock, 1,
9223 build_fold_addr_expr_loc (loc, decl));
9225 else
9227 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
9228 lock = build_call_expr_loc (loc, lock, 0);
9230 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
9231 unlock = build_call_expr_loc (loc, unlock, 0);
9234 push_gimplify_context ();
9236 block = make_node (BLOCK);
9237 bind = gimple_build_bind (NULL, NULL, block);
9238 gsi_replace (gsi_p, bind, true);
9239 gimple_bind_add_stmt (bind, stmt);
9241 tbody = gimple_bind_body (bind);
9242 gimplify_and_add (lock, &tbody);
9243 gimple_bind_set_body (bind, tbody);
9245 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9246 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9247 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9248 gimple_omp_set_body (stmt, NULL);
9250 tbody = gimple_bind_body (bind);
9251 gimplify_and_add (unlock, &tbody);
9252 gimple_bind_set_body (bind, tbody);
9254 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9256 pop_gimplify_context (bind);
9257 gimple_bind_append_vars (bind, ctx->block_vars);
9258 BLOCK_VARS (block) = gimple_bind_vars (bind);
9262 /* A subroutine of lower_omp_for. Generate code to emit the predicate
9263 for a lastprivate clause. Given a loop control predicate of (V
9264 cond N2), we gate the clause on (!(V cond N2)). The lowered form
9265 is appended to *DLIST, iterator initialization is appended to
9266 *BODY_P. */
9268 static void
9269 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
9270 gimple_seq *dlist, struct omp_context *ctx)
9272 tree clauses, cond, vinit;
9273 enum tree_code cond_code;
9274 gimple_seq stmts;
9276 cond_code = fd->loop.cond_code;
9277 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
9279 /* When possible, use a strict equality expression. This can let VRP
9280 type optimizations deduce the value and remove a copy. */
9281 if (tree_fits_shwi_p (fd->loop.step))
9283 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
9284 if (step == 1 || step == -1)
9285 cond_code = EQ_EXPR;
9288 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
9290 clauses = gimple_omp_for_clauses (fd->for_stmt);
9291 stmts = NULL;
9292 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
9293 if (!gimple_seq_empty_p (stmts))
9295 gimple_seq_add_seq (&stmts, *dlist);
9296 *dlist = stmts;
9298 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
9299 vinit = fd->loop.n1;
9300 if (cond_code == EQ_EXPR
9301 && tree_fits_shwi_p (fd->loop.n2)
9302 && ! integer_zerop (fd->loop.n2))
9303 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
9304 else
9305 vinit = unshare_expr (vinit);
9307 /* Initialize the iterator variable, so that threads that don't execute
9308 any iterations don't execute the lastprivate clauses by accident. */
9309 gimplify_assign (fd->loop.v, vinit, body_p);
9314 /* Lower code for an OpenMP loop directive. */
9316 static void
9317 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9319 tree *rhs_p, block;
9320 struct omp_for_data fd, *fdp = NULL;
9321 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
9322 gimple_seq omp_for_body, body, dlist;
9323 size_t i;
9325 push_gimplify_context ();
9327 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
9329 block = make_node (BLOCK);
9330 new_stmt = gimple_build_bind (NULL, NULL, block);
9331 /* Replace at gsi right away, so that 'stmt' is no member
9332 of a sequence anymore as we're going to add to to a different
9333 one below. */
9334 gsi_replace (gsi_p, new_stmt, true);
9336 /* Move declaration of temporaries in the loop body before we make
9337 it go away. */
9338 omp_for_body = gimple_omp_body (stmt);
9339 if (!gimple_seq_empty_p (omp_for_body)
9340 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
9342 gimple inner_bind = gimple_seq_first_stmt (omp_for_body);
9343 tree vars = gimple_bind_vars (inner_bind);
9344 gimple_bind_append_vars (new_stmt, vars);
9345 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
9346 keep them on the inner_bind and it's block. */
9347 gimple_bind_set_vars (inner_bind, NULL_TREE);
9348 if (gimple_bind_block (inner_bind))
9349 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
9352 if (gimple_omp_for_combined_into_p (stmt))
9354 extract_omp_for_data (stmt, &fd, NULL);
9355 fdp = &fd;
9357 /* We need two temporaries with fd.loop.v type (istart/iend)
9358 and then (fd.collapse - 1) temporaries with the same
9359 type for count2 ... countN-1 vars if not constant. */
9360 size_t count = 2;
9361 tree type = fd.iter_type;
9362 if (fd.collapse > 1
9363 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
9364 count += fd.collapse - 1;
9365 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
9366 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
9367 tree clauses = *pc;
9368 if (parallel_for)
9369 outerc
9370 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
9371 OMP_CLAUSE__LOOPTEMP_);
9372 for (i = 0; i < count; i++)
9374 tree temp;
9375 if (parallel_for)
9377 gcc_assert (outerc);
9378 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
9379 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
9380 OMP_CLAUSE__LOOPTEMP_);
9382 else
9384 temp = create_tmp_var (type, NULL);
9385 insert_decl_map (&ctx->outer->cb, temp, temp);
9387 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
9388 OMP_CLAUSE_DECL (*pc) = temp;
9389 pc = &OMP_CLAUSE_CHAIN (*pc);
9391 *pc = clauses;
9394 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9395 dlist = NULL;
9396 body = NULL;
9397 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
9398 fdp);
9399 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
9401 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9403 /* Lower the header expressions. At this point, we can assume that
9404 the header is of the form:
9406 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9408 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9409 using the .omp_data_s mapping, if needed. */
9410 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
9412 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
9413 if (!is_gimple_min_invariant (*rhs_p))
9414 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9416 rhs_p = gimple_omp_for_final_ptr (stmt, i);
9417 if (!is_gimple_min_invariant (*rhs_p))
9418 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9420 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
9421 if (!is_gimple_min_invariant (*rhs_p))
9422 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9425 /* Once lowered, extract the bounds and clauses. */
9426 extract_omp_for_data (stmt, &fd, NULL);
9428 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
9430 gimple_seq_add_stmt (&body, stmt);
9431 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
9433 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
9434 fd.loop.v));
9436 /* After the loop, add exit clauses. */
9437 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
9439 if (ctx->cancellable)
9440 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
9442 gimple_seq_add_seq (&body, dlist);
9444 body = maybe_catch_exception (body);
9446 /* Region exit marker goes at the end of the loop body. */
9447 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
9448 maybe_add_implicit_barrier_cancel (ctx, &body);
9449 pop_gimplify_context (new_stmt);
9451 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9452 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
9453 if (BLOCK_VARS (block))
9454 TREE_USED (block) = 1;
9456 gimple_bind_set_body (new_stmt, body);
9457 gimple_omp_set_body (stmt, NULL);
9458 gimple_omp_for_set_pre_body (stmt, NULL);
9461 /* Callback for walk_stmts. Check if the current statement only contains
9462 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9464 static tree
9465 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9466 bool *handled_ops_p,
9467 struct walk_stmt_info *wi)
9469 int *info = (int *) wi->info;
9470 gimple stmt = gsi_stmt (*gsi_p);
9472 *handled_ops_p = true;
9473 switch (gimple_code (stmt))
9475 WALK_SUBSTMTS;
9477 case GIMPLE_OMP_FOR:
9478 case GIMPLE_OMP_SECTIONS:
9479 *info = *info == 0 ? 1 : -1;
9480 break;
9481 default:
9482 *info = -1;
9483 break;
9485 return NULL;
9488 struct omp_taskcopy_context
9490 /* This field must be at the beginning, as we do "inheritance": Some
9491 callback functions for tree-inline.c (e.g., omp_copy_decl)
9492 receive a copy_body_data pointer that is up-casted to an
9493 omp_context pointer. */
9494 copy_body_data cb;
9495 omp_context *ctx;
9498 static tree
9499 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9501 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9503 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9504 return create_tmp_var (TREE_TYPE (var), NULL);
9506 return var;
9509 static tree
9510 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9512 tree name, new_fields = NULL, type, f;
9514 type = lang_hooks.types.make_type (RECORD_TYPE);
9515 name = DECL_NAME (TYPE_NAME (orig_type));
9516 name = build_decl (gimple_location (tcctx->ctx->stmt),
9517 TYPE_DECL, name, type);
9518 TYPE_NAME (type) = name;
9520 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9522 tree new_f = copy_node (f);
9523 DECL_CONTEXT (new_f) = type;
9524 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9525 TREE_CHAIN (new_f) = new_fields;
9526 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9527 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9528 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9529 &tcctx->cb, NULL);
9530 new_fields = new_f;
9531 tcctx->cb.decl_map->put (f, new_f);
9533 TYPE_FIELDS (type) = nreverse (new_fields);
9534 layout_type (type);
9535 return type;
9538 /* Create task copyfn. */
9540 static void
9541 create_task_copyfn (gimple task_stmt, omp_context *ctx)
9543 struct function *child_cfun;
9544 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9545 tree record_type, srecord_type, bind, list;
9546 bool record_needs_remap = false, srecord_needs_remap = false;
9547 splay_tree_node n;
9548 struct omp_taskcopy_context tcctx;
9549 location_t loc = gimple_location (task_stmt);
9551 child_fn = gimple_omp_task_copy_fn (task_stmt);
9552 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9553 gcc_assert (child_cfun->cfg == NULL);
9554 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9556 /* Reset DECL_CONTEXT on function arguments. */
9557 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9558 DECL_CONTEXT (t) = child_fn;
9560 /* Populate the function. */
9561 push_gimplify_context ();
9562 push_cfun (child_cfun);
9564 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9565 TREE_SIDE_EFFECTS (bind) = 1;
9566 list = NULL;
9567 DECL_SAVED_TREE (child_fn) = bind;
9568 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9570 /* Remap src and dst argument types if needed. */
9571 record_type = ctx->record_type;
9572 srecord_type = ctx->srecord_type;
9573 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9574 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9576 record_needs_remap = true;
9577 break;
9579 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9580 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9582 srecord_needs_remap = true;
9583 break;
9586 if (record_needs_remap || srecord_needs_remap)
9588 memset (&tcctx, '\0', sizeof (tcctx));
9589 tcctx.cb.src_fn = ctx->cb.src_fn;
9590 tcctx.cb.dst_fn = child_fn;
9591 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
9592 gcc_checking_assert (tcctx.cb.src_node);
9593 tcctx.cb.dst_node = tcctx.cb.src_node;
9594 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9595 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9596 tcctx.cb.eh_lp_nr = 0;
9597 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9598 tcctx.cb.decl_map = new hash_map<tree, tree>;
9599 tcctx.ctx = ctx;
9601 if (record_needs_remap)
9602 record_type = task_copyfn_remap_type (&tcctx, record_type);
9603 if (srecord_needs_remap)
9604 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9606 else
9607 tcctx.cb.decl_map = NULL;
9609 arg = DECL_ARGUMENTS (child_fn);
9610 TREE_TYPE (arg) = build_pointer_type (record_type);
9611 sarg = DECL_CHAIN (arg);
9612 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9614 /* First pass: initialize temporaries used in record_type and srecord_type
9615 sizes and field offsets. */
9616 if (tcctx.cb.decl_map)
9617 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9618 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9620 tree *p;
9622 decl = OMP_CLAUSE_DECL (c);
9623 p = tcctx.cb.decl_map->get (decl);
9624 if (p == NULL)
9625 continue;
9626 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9627 sf = (tree) n->value;
9628 sf = *tcctx.cb.decl_map->get (sf);
9629 src = build_simple_mem_ref_loc (loc, sarg);
9630 src = omp_build_component_ref (src, sf);
9631 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9632 append_to_statement_list (t, &list);
9635 /* Second pass: copy shared var pointers and copy construct non-VLA
9636 firstprivate vars. */
9637 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9638 switch (OMP_CLAUSE_CODE (c))
9640 case OMP_CLAUSE_SHARED:
9641 decl = OMP_CLAUSE_DECL (c);
9642 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9643 if (n == NULL)
9644 break;
9645 f = (tree) n->value;
9646 if (tcctx.cb.decl_map)
9647 f = *tcctx.cb.decl_map->get (f);
9648 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9649 sf = (tree) n->value;
9650 if (tcctx.cb.decl_map)
9651 sf = *tcctx.cb.decl_map->get (sf);
9652 src = build_simple_mem_ref_loc (loc, sarg);
9653 src = omp_build_component_ref (src, sf);
9654 dst = build_simple_mem_ref_loc (loc, arg);
9655 dst = omp_build_component_ref (dst, f);
9656 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9657 append_to_statement_list (t, &list);
9658 break;
9659 case OMP_CLAUSE_FIRSTPRIVATE:
9660 decl = OMP_CLAUSE_DECL (c);
9661 if (is_variable_sized (decl))
9662 break;
9663 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9664 if (n == NULL)
9665 break;
9666 f = (tree) n->value;
9667 if (tcctx.cb.decl_map)
9668 f = *tcctx.cb.decl_map->get (f);
9669 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9670 if (n != NULL)
9672 sf = (tree) n->value;
9673 if (tcctx.cb.decl_map)
9674 sf = *tcctx.cb.decl_map->get (sf);
9675 src = build_simple_mem_ref_loc (loc, sarg);
9676 src = omp_build_component_ref (src, sf);
9677 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9678 src = build_simple_mem_ref_loc (loc, src);
9680 else
9681 src = decl;
9682 dst = build_simple_mem_ref_loc (loc, arg);
9683 dst = omp_build_component_ref (dst, f);
9684 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9685 append_to_statement_list (t, &list);
9686 break;
9687 case OMP_CLAUSE_PRIVATE:
9688 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9689 break;
9690 decl = OMP_CLAUSE_DECL (c);
9691 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9692 f = (tree) n->value;
9693 if (tcctx.cb.decl_map)
9694 f = *tcctx.cb.decl_map->get (f);
9695 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9696 if (n != NULL)
9698 sf = (tree) n->value;
9699 if (tcctx.cb.decl_map)
9700 sf = *tcctx.cb.decl_map->get (sf);
9701 src = build_simple_mem_ref_loc (loc, sarg);
9702 src = omp_build_component_ref (src, sf);
9703 if (use_pointer_for_field (decl, NULL))
9704 src = build_simple_mem_ref_loc (loc, src);
9706 else
9707 src = decl;
9708 dst = build_simple_mem_ref_loc (loc, arg);
9709 dst = omp_build_component_ref (dst, f);
9710 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9711 append_to_statement_list (t, &list);
9712 break;
9713 default:
9714 break;
9717 /* Last pass: handle VLA firstprivates. */
9718 if (tcctx.cb.decl_map)
9719 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9720 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9722 tree ind, ptr, df;
9724 decl = OMP_CLAUSE_DECL (c);
9725 if (!is_variable_sized (decl))
9726 continue;
9727 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9728 if (n == NULL)
9729 continue;
9730 f = (tree) n->value;
9731 f = *tcctx.cb.decl_map->get (f);
9732 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9733 ind = DECL_VALUE_EXPR (decl);
9734 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9735 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9736 n = splay_tree_lookup (ctx->sfield_map,
9737 (splay_tree_key) TREE_OPERAND (ind, 0));
9738 sf = (tree) n->value;
9739 sf = *tcctx.cb.decl_map->get (sf);
9740 src = build_simple_mem_ref_loc (loc, sarg);
9741 src = omp_build_component_ref (src, sf);
9742 src = build_simple_mem_ref_loc (loc, src);
9743 dst = build_simple_mem_ref_loc (loc, arg);
9744 dst = omp_build_component_ref (dst, f);
9745 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9746 append_to_statement_list (t, &list);
9747 n = splay_tree_lookup (ctx->field_map,
9748 (splay_tree_key) TREE_OPERAND (ind, 0));
9749 df = (tree) n->value;
9750 df = *tcctx.cb.decl_map->get (df);
9751 ptr = build_simple_mem_ref_loc (loc, arg);
9752 ptr = omp_build_component_ref (ptr, df);
9753 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9754 build_fold_addr_expr_loc (loc, dst));
9755 append_to_statement_list (t, &list);
9758 t = build1 (RETURN_EXPR, void_type_node, NULL);
9759 append_to_statement_list (t, &list);
9761 if (tcctx.cb.decl_map)
9762 delete tcctx.cb.decl_map;
9763 pop_gimplify_context (NULL);
9764 BIND_EXPR_BODY (bind) = list;
9765 pop_cfun ();
9768 static void
9769 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9771 tree c, clauses;
9772 gimple g;
9773 size_t n_in = 0, n_out = 0, idx = 2, i;
9775 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9776 OMP_CLAUSE_DEPEND);
9777 gcc_assert (clauses);
9778 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9779 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9780 switch (OMP_CLAUSE_DEPEND_KIND (c))
9782 case OMP_CLAUSE_DEPEND_IN:
9783 n_in++;
9784 break;
9785 case OMP_CLAUSE_DEPEND_OUT:
9786 case OMP_CLAUSE_DEPEND_INOUT:
9787 n_out++;
9788 break;
9789 default:
9790 gcc_unreachable ();
9792 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9793 tree array = create_tmp_var (type, NULL);
9794 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9795 NULL_TREE);
9796 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9797 gimple_seq_add_stmt (iseq, g);
9798 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9799 NULL_TREE);
9800 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9801 gimple_seq_add_stmt (iseq, g);
9802 for (i = 0; i < 2; i++)
9804 if ((i ? n_in : n_out) == 0)
9805 continue;
9806 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9807 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9808 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9810 tree t = OMP_CLAUSE_DECL (c);
9811 t = fold_convert (ptr_type_node, t);
9812 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9813 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9814 NULL_TREE, NULL_TREE);
9815 g = gimple_build_assign (r, t);
9816 gimple_seq_add_stmt (iseq, g);
9819 tree *p = gimple_omp_task_clauses_ptr (stmt);
9820 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9821 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9822 OMP_CLAUSE_CHAIN (c) = *p;
9823 *p = c;
9824 tree clobber = build_constructor (type, NULL);
9825 TREE_THIS_VOLATILE (clobber) = 1;
9826 g = gimple_build_assign (array, clobber);
9827 gimple_seq_add_stmt (oseq, g);
9830 /* Lower the OpenMP parallel or task directive in the current statement
9831 in GSI_P. CTX holds context information for the directive. */
9833 static void
9834 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9836 tree clauses;
9837 tree child_fn, t;
9838 gimple stmt = gsi_stmt (*gsi_p);
9839 gimple par_bind, bind, dep_bind = NULL;
9840 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9841 location_t loc = gimple_location (stmt);
9843 clauses = gimple_omp_taskreg_clauses (stmt);
9844 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9845 par_body = gimple_bind_body (par_bind);
9846 child_fn = ctx->cb.dst_fn;
9847 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9848 && !gimple_omp_parallel_combined_p (stmt))
9850 struct walk_stmt_info wi;
9851 int ws_num = 0;
9853 memset (&wi, 0, sizeof (wi));
9854 wi.info = &ws_num;
9855 wi.val_only = true;
9856 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
9857 if (ws_num == 1)
9858 gimple_omp_parallel_set_combined_p (stmt, true);
9860 gimple_seq dep_ilist = NULL;
9861 gimple_seq dep_olist = NULL;
9862 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9863 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9865 push_gimplify_context ();
9866 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9867 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9870 if (ctx->srecord_type)
9871 create_task_copyfn (stmt, ctx);
9873 push_gimplify_context ();
9875 par_olist = NULL;
9876 par_ilist = NULL;
9877 par_rlist = NULL;
9878 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
9879 lower_omp (&par_body, ctx);
9880 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
9881 lower_reduction_clauses (clauses, &par_rlist, ctx);
9883 /* Declare all the variables created by mapping and the variables
9884 declared in the scope of the parallel body. */
9885 record_vars_into (ctx->block_vars, child_fn);
9886 record_vars_into (gimple_bind_vars (par_bind), child_fn);
9888 if (ctx->record_type)
9890 ctx->sender_decl
9891 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9892 : ctx->record_type, ".omp_data_o");
9893 DECL_NAMELESS (ctx->sender_decl) = 1;
9894 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9895 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
9898 olist = NULL;
9899 ilist = NULL;
9900 lower_send_clauses (clauses, &ilist, &olist, ctx);
9901 lower_send_shared_vars (&ilist, &olist, ctx);
9903 if (ctx->record_type)
9905 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
9906 TREE_THIS_VOLATILE (clobber) = 1;
9907 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9908 clobber));
9911 /* Once all the expansions are done, sequence all the different
9912 fragments inside gimple_omp_body. */
9914 new_body = NULL;
9916 if (ctx->record_type)
9918 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9919 /* fixup_child_record_type might have changed receiver_decl's type. */
9920 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9921 gimple_seq_add_stmt (&new_body,
9922 gimple_build_assign (ctx->receiver_decl, t));
9925 gimple_seq_add_seq (&new_body, par_ilist);
9926 gimple_seq_add_seq (&new_body, par_body);
9927 gimple_seq_add_seq (&new_body, par_rlist);
9928 if (ctx->cancellable)
9929 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
9930 gimple_seq_add_seq (&new_body, par_olist);
9931 new_body = maybe_catch_exception (new_body);
9932 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9933 gimple_omp_set_body (stmt, new_body);
9935 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
9936 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
9937 gimple_bind_add_seq (bind, ilist);
9938 gimple_bind_add_stmt (bind, stmt);
9939 gimple_bind_add_seq (bind, olist);
9941 pop_gimplify_context (NULL);
9943 if (dep_bind)
9945 gimple_bind_add_seq (dep_bind, dep_ilist);
9946 gimple_bind_add_stmt (dep_bind, bind);
9947 gimple_bind_add_seq (dep_bind, dep_olist);
9948 pop_gimplify_context (dep_bind);
9952 /* Lower the OpenMP target directive in the current statement
9953 in GSI_P. CTX holds context information for the directive. */
9955 static void
9956 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9958 tree clauses;
9959 tree child_fn, t, c;
9960 gimple stmt = gsi_stmt (*gsi_p);
9961 gimple tgt_bind = NULL, bind;
9962 gimple_seq tgt_body = NULL, olist, ilist, new_body;
9963 location_t loc = gimple_location (stmt);
9964 int kind = gimple_omp_target_kind (stmt);
9965 unsigned int map_cnt = 0;
9967 clauses = gimple_omp_target_clauses (stmt);
9968 if (kind == GF_OMP_TARGET_KIND_REGION)
9970 tgt_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9971 tgt_body = gimple_bind_body (tgt_bind);
9973 else if (kind == GF_OMP_TARGET_KIND_DATA)
9974 tgt_body = gimple_omp_body (stmt);
9975 child_fn = ctx->cb.dst_fn;
9977 push_gimplify_context ();
9979 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9980 switch (OMP_CLAUSE_CODE (c))
9982 tree var, x;
9984 default:
9985 break;
9986 case OMP_CLAUSE_MAP:
9987 case OMP_CLAUSE_TO:
9988 case OMP_CLAUSE_FROM:
9989 var = OMP_CLAUSE_DECL (c);
9990 if (!DECL_P (var))
9992 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
9993 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9994 map_cnt++;
9995 continue;
9998 if (DECL_SIZE (var)
9999 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
10001 tree var2 = DECL_VALUE_EXPR (var);
10002 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
10003 var2 = TREE_OPERAND (var2, 0);
10004 gcc_assert (DECL_P (var2));
10005 var = var2;
10008 if (!maybe_lookup_field (var, ctx))
10009 continue;
10011 if (kind == GF_OMP_TARGET_KIND_REGION)
10013 x = build_receiver_ref (var, true, ctx);
10014 tree new_var = lookup_decl (var, ctx);
10015 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10016 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10017 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10018 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
10019 x = build_simple_mem_ref (x);
10020 SET_DECL_VALUE_EXPR (new_var, x);
10021 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
10023 map_cnt++;
10026 if (kind == GF_OMP_TARGET_KIND_REGION)
10028 target_nesting_level++;
10029 lower_omp (&tgt_body, ctx);
10030 target_nesting_level--;
10032 else if (kind == GF_OMP_TARGET_KIND_DATA)
10033 lower_omp (&tgt_body, ctx);
10035 if (kind == GF_OMP_TARGET_KIND_REGION)
10037 /* Declare all the variables created by mapping and the variables
10038 declared in the scope of the target body. */
10039 record_vars_into (ctx->block_vars, child_fn);
10040 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
10043 olist = NULL;
10044 ilist = NULL;
10045 if (ctx->record_type)
10047 ctx->sender_decl
10048 = create_tmp_var (ctx->record_type, ".omp_data_arr");
10049 DECL_NAMELESS (ctx->sender_decl) = 1;
10050 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
10051 t = make_tree_vec (3);
10052 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
10053 TREE_VEC_ELT (t, 1)
10054 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
10055 ".omp_data_sizes");
10056 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
10057 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
10058 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
10059 TREE_VEC_ELT (t, 2)
10060 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
10061 map_cnt),
10062 ".omp_data_kinds");
10063 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
10064 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
10065 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
10066 gimple_omp_target_set_data_arg (stmt, t);
10068 vec<constructor_elt, va_gc> *vsize;
10069 vec<constructor_elt, va_gc> *vkind;
10070 vec_alloc (vsize, map_cnt);
10071 vec_alloc (vkind, map_cnt);
10072 unsigned int map_idx = 0;
10074 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10075 switch (OMP_CLAUSE_CODE (c))
10077 tree ovar, nc;
10079 default:
10080 break;
10081 case OMP_CLAUSE_MAP:
10082 case OMP_CLAUSE_TO:
10083 case OMP_CLAUSE_FROM:
10084 nc = c;
10085 ovar = OMP_CLAUSE_DECL (c);
10086 if (!DECL_P (ovar))
10088 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10089 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10091 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
10092 == get_base_address (ovar));
10093 nc = OMP_CLAUSE_CHAIN (c);
10094 ovar = OMP_CLAUSE_DECL (nc);
10096 else
10098 tree x = build_sender_ref (ovar, ctx);
10099 tree v
10100 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
10101 gimplify_assign (x, v, &ilist);
10102 nc = NULL_TREE;
10105 else
10107 if (DECL_SIZE (ovar)
10108 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
10110 tree ovar2 = DECL_VALUE_EXPR (ovar);
10111 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
10112 ovar2 = TREE_OPERAND (ovar2, 0);
10113 gcc_assert (DECL_P (ovar2));
10114 ovar = ovar2;
10116 if (!maybe_lookup_field (ovar, ctx))
10117 continue;
10120 if (nc)
10122 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
10123 tree x = build_sender_ref (ovar, ctx);
10124 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10125 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10126 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10127 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
10129 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10130 tree avar
10131 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
10132 mark_addressable (avar);
10133 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
10134 avar = build_fold_addr_expr (avar);
10135 gimplify_assign (x, avar, &ilist);
10137 else if (is_gimple_reg (var))
10139 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10140 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
10141 mark_addressable (avar);
10142 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
10143 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
10144 gimplify_assign (avar, var, &ilist);
10145 avar = build_fold_addr_expr (avar);
10146 gimplify_assign (x, avar, &ilist);
10147 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
10148 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
10149 && !TYPE_READONLY (TREE_TYPE (var)))
10151 x = build_sender_ref (ovar, ctx);
10152 x = build_simple_mem_ref (x);
10153 gimplify_assign (var, x, &olist);
10156 else
10158 var = build_fold_addr_expr (var);
10159 gimplify_assign (x, var, &ilist);
10162 tree s = OMP_CLAUSE_SIZE (c);
10163 if (s == NULL_TREE)
10164 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
10165 s = fold_convert (size_type_node, s);
10166 tree purpose = size_int (map_idx++);
10167 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
10168 if (TREE_CODE (s) != INTEGER_CST)
10169 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
10171 unsigned char tkind = 0;
10172 switch (OMP_CLAUSE_CODE (c))
10174 case OMP_CLAUSE_MAP:
10175 tkind = OMP_CLAUSE_MAP_KIND (c);
10176 break;
10177 case OMP_CLAUSE_TO:
10178 tkind = OMP_CLAUSE_MAP_TO;
10179 break;
10180 case OMP_CLAUSE_FROM:
10181 tkind = OMP_CLAUSE_MAP_FROM;
10182 break;
10183 default:
10184 gcc_unreachable ();
10186 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
10187 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
10188 talign = DECL_ALIGN_UNIT (ovar);
10189 talign = ceil_log2 (talign);
10190 tkind |= talign << 3;
10191 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
10192 build_int_cst (unsigned_char_type_node,
10193 tkind));
10194 if (nc && nc != c)
10195 c = nc;
10198 gcc_assert (map_idx == map_cnt);
10200 DECL_INITIAL (TREE_VEC_ELT (t, 1))
10201 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
10202 DECL_INITIAL (TREE_VEC_ELT (t, 2))
10203 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
10204 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
10206 gimple_seq initlist = NULL;
10207 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
10208 TREE_VEC_ELT (t, 1)),
10209 &initlist, true, NULL_TREE);
10210 gimple_seq_add_seq (&ilist, initlist);
10212 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
10213 NULL);
10214 TREE_THIS_VOLATILE (clobber) = 1;
10215 gimple_seq_add_stmt (&olist,
10216 gimple_build_assign (TREE_VEC_ELT (t, 1),
10217 clobber));
10220 tree clobber = build_constructor (ctx->record_type, NULL);
10221 TREE_THIS_VOLATILE (clobber) = 1;
10222 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
10223 clobber));
10226 /* Once all the expansions are done, sequence all the different
10227 fragments inside gimple_omp_body. */
10229 new_body = NULL;
10231 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
10233 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10234 /* fixup_child_record_type might have changed receiver_decl's type. */
10235 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10236 gimple_seq_add_stmt (&new_body,
10237 gimple_build_assign (ctx->receiver_decl, t));
10240 if (kind == GF_OMP_TARGET_KIND_REGION)
10242 gimple_seq_add_seq (&new_body, tgt_body);
10243 new_body = maybe_catch_exception (new_body);
10245 else if (kind == GF_OMP_TARGET_KIND_DATA)
10246 new_body = tgt_body;
10247 if (kind != GF_OMP_TARGET_KIND_UPDATE)
10249 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10250 gimple_omp_set_body (stmt, new_body);
10253 bind = gimple_build_bind (NULL, NULL,
10254 tgt_bind ? gimple_bind_block (tgt_bind)
10255 : NULL_TREE);
10256 gsi_replace (gsi_p, bind, true);
10257 gimple_bind_add_seq (bind, ilist);
10258 gimple_bind_add_stmt (bind, stmt);
10259 gimple_bind_add_seq (bind, olist);
10261 pop_gimplify_context (NULL);
10264 /* Expand code for an OpenMP teams directive. */
10266 static void
10267 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10269 gimple teams_stmt = gsi_stmt (*gsi_p);
10270 push_gimplify_context ();
10272 tree block = make_node (BLOCK);
10273 gimple bind = gimple_build_bind (NULL, NULL, block);
10274 gsi_replace (gsi_p, bind, true);
10275 gimple_seq bind_body = NULL;
10276 gimple_seq dlist = NULL;
10277 gimple_seq olist = NULL;
10279 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10280 OMP_CLAUSE_NUM_TEAMS);
10281 if (num_teams == NULL_TREE)
10282 num_teams = build_int_cst (unsigned_type_node, 0);
10283 else
10285 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
10286 num_teams = fold_convert (unsigned_type_node, num_teams);
10287 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
10289 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10290 OMP_CLAUSE_THREAD_LIMIT);
10291 if (thread_limit == NULL_TREE)
10292 thread_limit = build_int_cst (unsigned_type_node, 0);
10293 else
10295 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
10296 thread_limit = fold_convert (unsigned_type_node, thread_limit);
10297 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
10298 fb_rvalue);
10301 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
10302 &bind_body, &dlist, ctx, NULL);
10303 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
10304 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
10305 gimple_seq_add_stmt (&bind_body, teams_stmt);
10307 location_t loc = gimple_location (teams_stmt);
10308 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
10309 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
10310 gimple_set_location (call, loc);
10311 gimple_seq_add_stmt (&bind_body, call);
10313 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
10314 gimple_omp_set_body (teams_stmt, NULL);
10315 gimple_seq_add_seq (&bind_body, olist);
10316 gimple_seq_add_seq (&bind_body, dlist);
10317 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
10318 gimple_bind_set_body (bind, bind_body);
10320 pop_gimplify_context (bind);
10322 gimple_bind_append_vars (bind, ctx->block_vars);
10323 BLOCK_VARS (block) = ctx->block_vars;
10324 if (BLOCK_VARS (block))
10325 TREE_USED (block) = 1;
10329 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
10330 regimplified. If DATA is non-NULL, lower_omp_1 is outside
10331 of OpenMP context, but with task_shared_vars set. */
10333 static tree
10334 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
10335 void *data)
10337 tree t = *tp;
10339 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
10340 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
10341 return t;
10343 if (task_shared_vars
10344 && DECL_P (t)
10345 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
10346 return t;
10348 /* If a global variable has been privatized, TREE_CONSTANT on
10349 ADDR_EXPR might be wrong. */
10350 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
10351 recompute_tree_invariant_for_addr_expr (t);
10353 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
10354 return NULL_TREE;
10357 static void
10358 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10360 gimple stmt = gsi_stmt (*gsi_p);
10361 struct walk_stmt_info wi;
10363 if (gimple_has_location (stmt))
10364 input_location = gimple_location (stmt);
10366 if (task_shared_vars)
10367 memset (&wi, '\0', sizeof (wi));
10369 /* If we have issued syntax errors, avoid doing any heavy lifting.
10370 Just replace the OpenMP directives with a NOP to avoid
10371 confusing RTL expansion. */
10372 if (seen_error () && is_gimple_omp (stmt))
10374 gsi_replace (gsi_p, gimple_build_nop (), true);
10375 return;
10378 switch (gimple_code (stmt))
10380 case GIMPLE_COND:
10381 if ((ctx || task_shared_vars)
10382 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
10383 ctx ? NULL : &wi, NULL)
10384 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
10385 ctx ? NULL : &wi, NULL)))
10386 gimple_regimplify_operands (stmt, gsi_p);
10387 break;
10388 case GIMPLE_CATCH:
10389 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
10390 break;
10391 case GIMPLE_EH_FILTER:
10392 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
10393 break;
10394 case GIMPLE_TRY:
10395 lower_omp (gimple_try_eval_ptr (stmt), ctx);
10396 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
10397 break;
10398 case GIMPLE_TRANSACTION:
10399 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
10400 break;
10401 case GIMPLE_BIND:
10402 lower_omp (gimple_bind_body_ptr (stmt), ctx);
10403 break;
10404 case GIMPLE_OMP_PARALLEL:
10405 case GIMPLE_OMP_TASK:
10406 ctx = maybe_lookup_ctx (stmt);
10407 gcc_assert (ctx);
10408 if (ctx->cancellable)
10409 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10410 lower_omp_taskreg (gsi_p, ctx);
10411 break;
10412 case GIMPLE_OMP_FOR:
10413 ctx = maybe_lookup_ctx (stmt);
10414 gcc_assert (ctx);
10415 if (ctx->cancellable)
10416 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10417 lower_omp_for (gsi_p, ctx);
10418 break;
10419 case GIMPLE_OMP_SECTIONS:
10420 ctx = maybe_lookup_ctx (stmt);
10421 gcc_assert (ctx);
10422 if (ctx->cancellable)
10423 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10424 lower_omp_sections (gsi_p, ctx);
10425 break;
10426 case GIMPLE_OMP_SINGLE:
10427 ctx = maybe_lookup_ctx (stmt);
10428 gcc_assert (ctx);
10429 lower_omp_single (gsi_p, ctx);
10430 break;
10431 case GIMPLE_OMP_MASTER:
10432 ctx = maybe_lookup_ctx (stmt);
10433 gcc_assert (ctx);
10434 lower_omp_master (gsi_p, ctx);
10435 break;
10436 case GIMPLE_OMP_TASKGROUP:
10437 ctx = maybe_lookup_ctx (stmt);
10438 gcc_assert (ctx);
10439 lower_omp_taskgroup (gsi_p, ctx);
10440 break;
10441 case GIMPLE_OMP_ORDERED:
10442 ctx = maybe_lookup_ctx (stmt);
10443 gcc_assert (ctx);
10444 lower_omp_ordered (gsi_p, ctx);
10445 break;
10446 case GIMPLE_OMP_CRITICAL:
10447 ctx = maybe_lookup_ctx (stmt);
10448 gcc_assert (ctx);
10449 lower_omp_critical (gsi_p, ctx);
10450 break;
10451 case GIMPLE_OMP_ATOMIC_LOAD:
10452 if ((ctx || task_shared_vars)
10453 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
10454 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
10455 gimple_regimplify_operands (stmt, gsi_p);
10456 break;
10457 case GIMPLE_OMP_TARGET:
10458 ctx = maybe_lookup_ctx (stmt);
10459 gcc_assert (ctx);
10460 lower_omp_target (gsi_p, ctx);
10461 break;
10462 case GIMPLE_OMP_TEAMS:
10463 ctx = maybe_lookup_ctx (stmt);
10464 gcc_assert (ctx);
10465 lower_omp_teams (gsi_p, ctx);
10466 break;
10467 case GIMPLE_CALL:
10468 tree fndecl;
10469 fndecl = gimple_call_fndecl (stmt);
10470 if (fndecl
10471 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10472 switch (DECL_FUNCTION_CODE (fndecl))
10474 case BUILT_IN_GOMP_BARRIER:
10475 if (ctx == NULL)
10476 break;
10477 /* FALLTHRU */
10478 case BUILT_IN_GOMP_CANCEL:
10479 case BUILT_IN_GOMP_CANCELLATION_POINT:
10480 omp_context *cctx;
10481 cctx = ctx;
10482 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10483 cctx = cctx->outer;
10484 gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
10485 if (!cctx->cancellable)
10487 if (DECL_FUNCTION_CODE (fndecl)
10488 == BUILT_IN_GOMP_CANCELLATION_POINT)
10490 stmt = gimple_build_nop ();
10491 gsi_replace (gsi_p, stmt, false);
10493 break;
10495 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10497 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10498 gimple_call_set_fndecl (stmt, fndecl);
10499 gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
10501 tree lhs;
10502 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)), NULL);
10503 gimple_call_set_lhs (stmt, lhs);
10504 tree fallthru_label;
10505 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10506 gimple g;
10507 g = gimple_build_label (fallthru_label);
10508 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10509 g = gimple_build_cond (NE_EXPR, lhs,
10510 fold_convert (TREE_TYPE (lhs),
10511 boolean_false_node),
10512 cctx->cancel_label, fallthru_label);
10513 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10514 break;
10515 default:
10516 break;
10518 /* FALLTHRU */
10519 default:
10520 if ((ctx || task_shared_vars)
10521 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10522 ctx ? NULL : &wi))
10524 /* Just remove clobbers, this should happen only if we have
10525 "privatized" local addressable variables in SIMD regions,
10526 the clobber isn't needed in that case and gimplifying address
10527 of the ARRAY_REF into a pointer and creating MEM_REF based
10528 clobber would create worse code than we get with the clobber
10529 dropped. */
10530 if (gimple_clobber_p (stmt))
10532 gsi_replace (gsi_p, gimple_build_nop (), true);
10533 break;
10535 gimple_regimplify_operands (stmt, gsi_p);
10537 break;
10541 static void
10542 lower_omp (gimple_seq *body, omp_context *ctx)
10544 location_t saved_location = input_location;
10545 gimple_stmt_iterator gsi;
10546 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10547 lower_omp_1 (&gsi, ctx);
10548 /* During gimplification, we have not always invoked fold_stmt
10549 (gimplify.c:maybe_fold_stmt); call it now. */
10550 if (target_nesting_level)
10551 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10552 fold_stmt (&gsi);
10553 input_location = saved_location;
10556 /* Main entry point. */
10558 static unsigned int
10559 execute_lower_omp (void)
10561 gimple_seq body;
10563 /* This pass always runs, to provide PROP_gimple_lomp.
10564 But there is nothing to do unless -fopenmp is given. */
10565 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_cilkplus == 0)
10566 return 0;
10568 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10569 delete_omp_context);
10571 body = gimple_body (current_function_decl);
10572 scan_omp (&body, NULL);
10573 gcc_assert (taskreg_nesting_level == 0);
10575 if (all_contexts->root)
10577 if (task_shared_vars)
10578 push_gimplify_context ();
10579 lower_omp (&body, NULL);
10580 if (task_shared_vars)
10581 pop_gimplify_context (NULL);
10584 if (all_contexts)
10586 splay_tree_delete (all_contexts);
10587 all_contexts = NULL;
10589 BITMAP_FREE (task_shared_vars);
10590 return 0;
10593 namespace {
10595 const pass_data pass_data_lower_omp =
10597 GIMPLE_PASS, /* type */
10598 "omplower", /* name */
10599 OPTGROUP_NONE, /* optinfo_flags */
10600 TV_NONE, /* tv_id */
10601 PROP_gimple_any, /* properties_required */
10602 PROP_gimple_lomp, /* properties_provided */
10603 0, /* properties_destroyed */
10604 0, /* todo_flags_start */
10605 0, /* todo_flags_finish */
10608 class pass_lower_omp : public gimple_opt_pass
10610 public:
10611 pass_lower_omp (gcc::context *ctxt)
10612 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10615 /* opt_pass methods: */
10616 virtual unsigned int execute (function *) { return execute_lower_omp (); }
10618 }; // class pass_lower_omp
10620 } // anon namespace
10622 gimple_opt_pass *
10623 make_pass_lower_omp (gcc::context *ctxt)
10625 return new pass_lower_omp (ctxt);
10628 /* The following is a utility to diagnose OpenMP structured block violations.
10629 It is not part of the "omplower" pass, as that's invoked too late. It
10630 should be invoked by the respective front ends after gimplification. */
10632 static splay_tree all_labels;
10634 /* Check for mismatched contexts and generate an error if needed. Return
10635 true if an error is detected. */
10637 static bool
10638 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10639 gimple branch_ctx, gimple label_ctx)
10641 if (label_ctx == branch_ctx)
10642 return false;
10646 Previously we kept track of the label's entire context in diagnose_sb_[12]
10647 so we could traverse it and issue a correct "exit" or "enter" error
10648 message upon a structured block violation.
10650 We built the context by building a list with tree_cons'ing, but there is
10651 no easy counterpart in gimple tuples. It seems like far too much work
10652 for issuing exit/enter error messages. If someone really misses the
10653 distinct error message... patches welcome.
10656 #if 0
10657 /* Try to avoid confusing the user by producing and error message
10658 with correct "exit" or "enter" verbiage. We prefer "exit"
10659 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10660 if (branch_ctx == NULL)
10661 exit_p = false;
10662 else
10664 while (label_ctx)
10666 if (TREE_VALUE (label_ctx) == branch_ctx)
10668 exit_p = false;
10669 break;
10671 label_ctx = TREE_CHAIN (label_ctx);
10675 if (exit_p)
10676 error ("invalid exit from OpenMP structured block");
10677 else
10678 error ("invalid entry to OpenMP structured block");
10679 #endif
10681 bool cilkplus_block = false;
10682 if (flag_cilkplus)
10684 if ((branch_ctx
10685 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10686 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10687 || (label_ctx
10688 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
10689 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10690 cilkplus_block = true;
10693 /* If it's obvious we have an invalid entry, be specific about the error. */
10694 if (branch_ctx == NULL)
10696 if (cilkplus_block)
10697 error ("invalid entry to Cilk Plus structured block");
10698 else
10699 error ("invalid entry to OpenMP structured block");
10701 else
10703 /* Otherwise, be vague and lazy, but efficient. */
10704 if (cilkplus_block)
10705 error ("invalid branch to/from a Cilk Plus structured block");
10706 else
10707 error ("invalid branch to/from an OpenMP structured block");
10710 gsi_replace (gsi_p, gimple_build_nop (), false);
10711 return true;
10714 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10715 where each label is found. */
10717 static tree
10718 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10719 struct walk_stmt_info *wi)
10721 gimple context = (gimple) wi->info;
10722 gimple inner_context;
10723 gimple stmt = gsi_stmt (*gsi_p);
10725 *handled_ops_p = true;
10727 switch (gimple_code (stmt))
10729 WALK_SUBSTMTS;
10731 case GIMPLE_OMP_PARALLEL:
10732 case GIMPLE_OMP_TASK:
10733 case GIMPLE_OMP_SECTIONS:
10734 case GIMPLE_OMP_SINGLE:
10735 case GIMPLE_OMP_SECTION:
10736 case GIMPLE_OMP_MASTER:
10737 case GIMPLE_OMP_ORDERED:
10738 case GIMPLE_OMP_CRITICAL:
10739 case GIMPLE_OMP_TARGET:
10740 case GIMPLE_OMP_TEAMS:
10741 case GIMPLE_OMP_TASKGROUP:
10742 /* The minimal context here is just the current OMP construct. */
10743 inner_context = stmt;
10744 wi->info = inner_context;
10745 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10746 wi->info = context;
10747 break;
10749 case GIMPLE_OMP_FOR:
10750 inner_context = stmt;
10751 wi->info = inner_context;
10752 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10753 walk them. */
10754 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10755 diagnose_sb_1, NULL, wi);
10756 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10757 wi->info = context;
10758 break;
10760 case GIMPLE_LABEL:
10761 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
10762 (splay_tree_value) context);
10763 break;
10765 default:
10766 break;
10769 return NULL_TREE;
10772 /* Pass 2: Check each branch and see if its context differs from that of
10773 the destination label's context. */
10775 static tree
10776 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10777 struct walk_stmt_info *wi)
10779 gimple context = (gimple) wi->info;
10780 splay_tree_node n;
10781 gimple stmt = gsi_stmt (*gsi_p);
10783 *handled_ops_p = true;
10785 switch (gimple_code (stmt))
10787 WALK_SUBSTMTS;
10789 case GIMPLE_OMP_PARALLEL:
10790 case GIMPLE_OMP_TASK:
10791 case GIMPLE_OMP_SECTIONS:
10792 case GIMPLE_OMP_SINGLE:
10793 case GIMPLE_OMP_SECTION:
10794 case GIMPLE_OMP_MASTER:
10795 case GIMPLE_OMP_ORDERED:
10796 case GIMPLE_OMP_CRITICAL:
10797 case GIMPLE_OMP_TARGET:
10798 case GIMPLE_OMP_TEAMS:
10799 case GIMPLE_OMP_TASKGROUP:
10800 wi->info = stmt;
10801 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10802 wi->info = context;
10803 break;
10805 case GIMPLE_OMP_FOR:
10806 wi->info = stmt;
10807 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10808 walk them. */
10809 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10810 diagnose_sb_2, NULL, wi);
10811 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10812 wi->info = context;
10813 break;
10815 case GIMPLE_COND:
10817 tree lab = gimple_cond_true_label (stmt);
10818 if (lab)
10820 n = splay_tree_lookup (all_labels,
10821 (splay_tree_key) lab);
10822 diagnose_sb_0 (gsi_p, context,
10823 n ? (gimple) n->value : NULL);
10825 lab = gimple_cond_false_label (stmt);
10826 if (lab)
10828 n = splay_tree_lookup (all_labels,
10829 (splay_tree_key) lab);
10830 diagnose_sb_0 (gsi_p, context,
10831 n ? (gimple) n->value : NULL);
10834 break;
10836 case GIMPLE_GOTO:
10838 tree lab = gimple_goto_dest (stmt);
10839 if (TREE_CODE (lab) != LABEL_DECL)
10840 break;
10842 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10843 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
10845 break;
10847 case GIMPLE_SWITCH:
10849 unsigned int i;
10850 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
10852 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
10853 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10854 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
10855 break;
10858 break;
10860 case GIMPLE_RETURN:
10861 diagnose_sb_0 (gsi_p, context, NULL);
10862 break;
10864 default:
10865 break;
10868 return NULL_TREE;
10871 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10872 codes. */
10873 bool
10874 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
10875 int *region_idx)
10877 gimple last = last_stmt (bb);
10878 enum gimple_code code = gimple_code (last);
10879 struct omp_region *cur_region = *region;
10880 bool fallthru = false;
10882 switch (code)
10884 case GIMPLE_OMP_PARALLEL:
10885 case GIMPLE_OMP_TASK:
10886 case GIMPLE_OMP_FOR:
10887 case GIMPLE_OMP_SINGLE:
10888 case GIMPLE_OMP_TEAMS:
10889 case GIMPLE_OMP_MASTER:
10890 case GIMPLE_OMP_TASKGROUP:
10891 case GIMPLE_OMP_ORDERED:
10892 case GIMPLE_OMP_CRITICAL:
10893 case GIMPLE_OMP_SECTION:
10894 cur_region = new_omp_region (bb, code, cur_region);
10895 fallthru = true;
10896 break;
10898 case GIMPLE_OMP_TARGET:
10899 cur_region = new_omp_region (bb, code, cur_region);
10900 fallthru = true;
10901 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
10902 cur_region = cur_region->outer;
10903 break;
10905 case GIMPLE_OMP_SECTIONS:
10906 cur_region = new_omp_region (bb, code, cur_region);
10907 fallthru = true;
10908 break;
10910 case GIMPLE_OMP_SECTIONS_SWITCH:
10911 fallthru = false;
10912 break;
10914 case GIMPLE_OMP_ATOMIC_LOAD:
10915 case GIMPLE_OMP_ATOMIC_STORE:
10916 fallthru = true;
10917 break;
10919 case GIMPLE_OMP_RETURN:
10920 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
10921 somewhere other than the next block. This will be
10922 created later. */
10923 cur_region->exit = bb;
10924 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
10925 cur_region = cur_region->outer;
10926 break;
10928 case GIMPLE_OMP_CONTINUE:
10929 cur_region->cont = bb;
10930 switch (cur_region->type)
10932 case GIMPLE_OMP_FOR:
10933 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
10934 succs edges as abnormal to prevent splitting
10935 them. */
10936 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
10937 /* Make the loopback edge. */
10938 make_edge (bb, single_succ (cur_region->entry),
10939 EDGE_ABNORMAL);
10941 /* Create an edge from GIMPLE_OMP_FOR to exit, which
10942 corresponds to the case that the body of the loop
10943 is not executed at all. */
10944 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
10945 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
10946 fallthru = false;
10947 break;
10949 case GIMPLE_OMP_SECTIONS:
10950 /* Wire up the edges into and out of the nested sections. */
10952 basic_block switch_bb = single_succ (cur_region->entry);
10954 struct omp_region *i;
10955 for (i = cur_region->inner; i ; i = i->next)
10957 gcc_assert (i->type == GIMPLE_OMP_SECTION);
10958 make_edge (switch_bb, i->entry, 0);
10959 make_edge (i->exit, bb, EDGE_FALLTHRU);
10962 /* Make the loopback edge to the block with
10963 GIMPLE_OMP_SECTIONS_SWITCH. */
10964 make_edge (bb, switch_bb, 0);
10966 /* Make the edge from the switch to exit. */
10967 make_edge (switch_bb, bb->next_bb, 0);
10968 fallthru = false;
10970 break;
10972 default:
10973 gcc_unreachable ();
10975 break;
10977 default:
10978 gcc_unreachable ();
10981 if (*region != cur_region)
10983 *region = cur_region;
10984 if (cur_region)
10985 *region_idx = cur_region->entry->index;
10986 else
10987 *region_idx = 0;
10990 return fallthru;
10993 static unsigned int
10994 diagnose_omp_structured_block_errors (void)
10996 struct walk_stmt_info wi;
10997 gimple_seq body = gimple_body (current_function_decl);
10999 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
11001 memset (&wi, 0, sizeof (wi));
11002 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
11004 memset (&wi, 0, sizeof (wi));
11005 wi.want_locations = true;
11006 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
11008 gimple_set_body (current_function_decl, body);
11010 splay_tree_delete (all_labels);
11011 all_labels = NULL;
11013 return 0;
11016 namespace {
11018 const pass_data pass_data_diagnose_omp_blocks =
11020 GIMPLE_PASS, /* type */
11021 "*diagnose_omp_blocks", /* name */
11022 OPTGROUP_NONE, /* optinfo_flags */
11023 TV_NONE, /* tv_id */
11024 PROP_gimple_any, /* properties_required */
11025 0, /* properties_provided */
11026 0, /* properties_destroyed */
11027 0, /* todo_flags_start */
11028 0, /* todo_flags_finish */
11031 class pass_diagnose_omp_blocks : public gimple_opt_pass
11033 public:
11034 pass_diagnose_omp_blocks (gcc::context *ctxt)
11035 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
11038 /* opt_pass methods: */
11039 virtual bool gate (function *) { return flag_openmp || flag_cilkplus; }
11040 virtual unsigned int execute (function *)
11042 return diagnose_omp_structured_block_errors ();
11045 }; // class pass_diagnose_omp_blocks
11047 } // anon namespace
11049 gimple_opt_pass *
11050 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
11052 return new pass_diagnose_omp_blocks (ctxt);
11055 /* SIMD clone supporting code. */
11057 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
11058 of arguments to reserve space for. */
11060 static struct cgraph_simd_clone *
11061 simd_clone_struct_alloc (int nargs)
11063 struct cgraph_simd_clone *clone_info;
11064 size_t len = (sizeof (struct cgraph_simd_clone)
11065 + nargs * sizeof (struct cgraph_simd_clone_arg));
11066 clone_info = (struct cgraph_simd_clone *)
11067 ggc_internal_cleared_alloc (len);
11068 return clone_info;
11071 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
11073 static inline void
11074 simd_clone_struct_copy (struct cgraph_simd_clone *to,
11075 struct cgraph_simd_clone *from)
11077 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
11078 + ((from->nargs - from->inbranch)
11079 * sizeof (struct cgraph_simd_clone_arg))));
11082 /* Return vector of parameter types of function FNDECL. This uses
11083 TYPE_ARG_TYPES if available, otherwise falls back to types of
11084 DECL_ARGUMENTS types. */
11086 vec<tree>
11087 simd_clone_vector_of_formal_parm_types (tree fndecl)
11089 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
11090 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
11091 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
11092 unsigned int i;
11093 tree arg;
11094 FOR_EACH_VEC_ELT (args, i, arg)
11095 args[i] = TREE_TYPE (args[i]);
11096 return args;
11099 /* Given a simd function in NODE, extract the simd specific
11100 information from the OMP clauses passed in CLAUSES, and return
11101 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
11102 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
11103 otherwise set to FALSE. */
11105 static struct cgraph_simd_clone *
11106 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
11107 bool *inbranch_specified)
11109 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
11110 tree t;
11111 int n;
11112 *inbranch_specified = false;
11114 n = args.length ();
11115 if (n > 0 && args.last () == void_type_node)
11116 n--;
11118 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
11119 be cloned have a distinctive artificial label in addition to "omp
11120 declare simd". */
11121 bool cilk_clone
11122 = (flag_cilkplus
11123 && lookup_attribute ("cilk simd function",
11124 DECL_ATTRIBUTES (node->decl)));
11126 /* Allocate one more than needed just in case this is an in-branch
11127 clone which will require a mask argument. */
11128 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
11129 clone_info->nargs = n;
11130 clone_info->cilk_elemental = cilk_clone;
11132 if (!clauses)
11134 args.release ();
11135 return clone_info;
11137 clauses = TREE_VALUE (clauses);
11138 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
11139 return clone_info;
11141 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
11143 switch (OMP_CLAUSE_CODE (t))
11145 case OMP_CLAUSE_INBRANCH:
11146 clone_info->inbranch = 1;
11147 *inbranch_specified = true;
11148 break;
11149 case OMP_CLAUSE_NOTINBRANCH:
11150 clone_info->inbranch = 0;
11151 *inbranch_specified = true;
11152 break;
11153 case OMP_CLAUSE_SIMDLEN:
11154 clone_info->simdlen
11155 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
11156 break;
11157 case OMP_CLAUSE_LINEAR:
11159 tree decl = OMP_CLAUSE_DECL (t);
11160 tree step = OMP_CLAUSE_LINEAR_STEP (t);
11161 int argno = TREE_INT_CST_LOW (decl);
11162 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
11164 clone_info->args[argno].arg_type
11165 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
11166 clone_info->args[argno].linear_step = tree_to_shwi (step);
11167 gcc_assert (clone_info->args[argno].linear_step >= 0
11168 && clone_info->args[argno].linear_step < n);
11170 else
11172 if (POINTER_TYPE_P (args[argno]))
11173 step = fold_convert (ssizetype, step);
11174 if (!tree_fits_shwi_p (step))
11176 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11177 "ignoring large linear step");
11178 args.release ();
11179 return NULL;
11181 else if (integer_zerop (step))
11183 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11184 "ignoring zero linear step");
11185 args.release ();
11186 return NULL;
11188 else
11190 clone_info->args[argno].arg_type
11191 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
11192 clone_info->args[argno].linear_step = tree_to_shwi (step);
11195 break;
11197 case OMP_CLAUSE_UNIFORM:
11199 tree decl = OMP_CLAUSE_DECL (t);
11200 int argno = tree_to_uhwi (decl);
11201 clone_info->args[argno].arg_type
11202 = SIMD_CLONE_ARG_TYPE_UNIFORM;
11203 break;
11205 case OMP_CLAUSE_ALIGNED:
11207 tree decl = OMP_CLAUSE_DECL (t);
11208 int argno = tree_to_uhwi (decl);
11209 clone_info->args[argno].alignment
11210 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
11211 break;
11213 default:
11214 break;
11217 args.release ();
11218 return clone_info;
11221 /* Given a SIMD clone in NODE, calculate the characteristic data
11222 type and return the coresponding type. The characteristic data
11223 type is computed as described in the Intel Vector ABI. */
11225 static tree
11226 simd_clone_compute_base_data_type (struct cgraph_node *node,
11227 struct cgraph_simd_clone *clone_info)
11229 tree type = integer_type_node;
11230 tree fndecl = node->decl;
11232 /* a) For non-void function, the characteristic data type is the
11233 return type. */
11234 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
11235 type = TREE_TYPE (TREE_TYPE (fndecl));
11237 /* b) If the function has any non-uniform, non-linear parameters,
11238 then the characteristic data type is the type of the first
11239 such parameter. */
11240 else
11242 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
11243 for (unsigned int i = 0; i < clone_info->nargs; ++i)
11244 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
11246 type = map[i];
11247 break;
11249 map.release ();
11252 /* c) If the characteristic data type determined by a) or b) above
11253 is struct, union, or class type which is pass-by-value (except
11254 for the type that maps to the built-in complex data type), the
11255 characteristic data type is int. */
11256 if (RECORD_OR_UNION_TYPE_P (type)
11257 && !aggregate_value_p (type, NULL)
11258 && TREE_CODE (type) != COMPLEX_TYPE)
11259 return integer_type_node;
11261 /* d) If none of the above three classes is applicable, the
11262 characteristic data type is int. */
11264 return type;
11266 /* e) For Intel Xeon Phi native and offload compilation, if the
11267 resulting characteristic data type is 8-bit or 16-bit integer
11268 data type, the characteristic data type is int. */
11269 /* Well, we don't handle Xeon Phi yet. */
11272 static tree
11273 simd_clone_mangle (struct cgraph_node *node,
11274 struct cgraph_simd_clone *clone_info)
11276 char vecsize_mangle = clone_info->vecsize_mangle;
11277 char mask = clone_info->inbranch ? 'M' : 'N';
11278 unsigned int simdlen = clone_info->simdlen;
11279 unsigned int n;
11280 pretty_printer pp;
11282 gcc_assert (vecsize_mangle && simdlen);
11284 pp_string (&pp, "_ZGV");
11285 pp_character (&pp, vecsize_mangle);
11286 pp_character (&pp, mask);
11287 pp_decimal_int (&pp, simdlen);
11289 for (n = 0; n < clone_info->nargs; ++n)
11291 struct cgraph_simd_clone_arg arg = clone_info->args[n];
11293 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
11294 pp_character (&pp, 'u');
11295 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
11297 gcc_assert (arg.linear_step != 0);
11298 pp_character (&pp, 'l');
11299 if (arg.linear_step > 1)
11300 pp_unsigned_wide_integer (&pp, arg.linear_step);
11301 else if (arg.linear_step < 0)
11303 pp_character (&pp, 'n');
11304 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
11305 arg.linear_step));
11308 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
11310 pp_character (&pp, 's');
11311 pp_unsigned_wide_integer (&pp, arg.linear_step);
11313 else
11314 pp_character (&pp, 'v');
11315 if (arg.alignment)
11317 pp_character (&pp, 'a');
11318 pp_decimal_int (&pp, arg.alignment);
11322 pp_underscore (&pp);
11323 pp_string (&pp,
11324 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)));
11325 const char *str = pp_formatted_text (&pp);
11327 /* If there already is a SIMD clone with the same mangled name, don't
11328 add another one. This can happen e.g. for
11329 #pragma omp declare simd
11330 #pragma omp declare simd simdlen(8)
11331 int foo (int, int);
11332 if the simdlen is assumed to be 8 for the first one, etc. */
11333 for (struct cgraph_node *clone = node->simd_clones; clone;
11334 clone = clone->simdclone->next_clone)
11335 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
11336 str) == 0)
11337 return NULL_TREE;
11339 return get_identifier (str);
11342 /* Create a simd clone of OLD_NODE and return it. */
11344 static struct cgraph_node *
11345 simd_clone_create (struct cgraph_node *old_node)
11347 struct cgraph_node *new_node;
11348 if (old_node->definition)
11350 if (!old_node->has_gimple_body_p ())
11351 return NULL;
11352 old_node->get_body ();
11353 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
11354 false, NULL, NULL,
11355 "simdclone");
11357 else
11359 tree old_decl = old_node->decl;
11360 tree new_decl = copy_node (old_node->decl);
11361 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
11362 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
11363 SET_DECL_RTL (new_decl, NULL);
11364 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
11365 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
11366 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
11367 symtab->call_cgraph_insertion_hooks (new_node);
11369 if (new_node == NULL)
11370 return new_node;
11372 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
11374 /* The function cgraph_function_versioning () will force the new
11375 symbol local. Undo this, and inherit external visability from
11376 the old node. */
11377 new_node->local.local = old_node->local.local;
11378 new_node->externally_visible = old_node->externally_visible;
11380 return new_node;
11383 /* Adjust the return type of the given function to its appropriate
11384 vector counterpart. Returns a simd array to be used throughout the
11385 function as a return value. */
11387 static tree
11388 simd_clone_adjust_return_type (struct cgraph_node *node)
11390 tree fndecl = node->decl;
11391 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
11392 unsigned int veclen;
11393 tree t;
11395 /* Adjust the function return type. */
11396 if (orig_rettype == void_type_node)
11397 return NULL_TREE;
11398 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
11399 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))
11400 || POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl))))
11401 veclen = node->simdclone->vecsize_int;
11402 else
11403 veclen = node->simdclone->vecsize_float;
11404 veclen /= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl))));
11405 if (veclen > node->simdclone->simdlen)
11406 veclen = node->simdclone->simdlen;
11407 if (veclen == node->simdclone->simdlen)
11408 TREE_TYPE (TREE_TYPE (fndecl))
11409 = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)),
11410 node->simdclone->simdlen);
11411 else
11413 t = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)), veclen);
11414 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
11415 TREE_TYPE (TREE_TYPE (fndecl)) = t;
11417 if (!node->definition)
11418 return NULL_TREE;
11420 t = DECL_RESULT (fndecl);
11421 /* Adjust the DECL_RESULT. */
11422 gcc_assert (TREE_TYPE (t) != void_type_node);
11423 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
11424 relayout_decl (t);
11426 tree atype = build_array_type_nelts (orig_rettype,
11427 node->simdclone->simdlen);
11428 if (veclen != node->simdclone->simdlen)
11429 return build1 (VIEW_CONVERT_EXPR, atype, t);
11431 /* Set up a SIMD array to use as the return value. */
11432 tree retval = create_tmp_var_raw (atype, "retval");
11433 gimple_add_tmp_var (retval);
11434 return retval;
11437 /* Each vector argument has a corresponding array to be used locally
11438 as part of the eventual loop. Create such temporary array and
11439 return it.
11441 PREFIX is the prefix to be used for the temporary.
11443 TYPE is the inner element type.
11445 SIMDLEN is the number of elements. */
11447 static tree
11448 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
11450 tree atype = build_array_type_nelts (type, simdlen);
11451 tree avar = create_tmp_var_raw (atype, prefix);
11452 gimple_add_tmp_var (avar);
11453 return avar;
11456 /* Modify the function argument types to their corresponding vector
11457 counterparts if appropriate. Also, create one array for each simd
11458 argument to be used locally when using the function arguments as
11459 part of the loop.
11461 NODE is the function whose arguments are to be adjusted.
11463 Returns an adjustment vector that will be filled describing how the
11464 argument types will be adjusted. */
11466 static ipa_parm_adjustment_vec
11467 simd_clone_adjust_argument_types (struct cgraph_node *node)
11469 vec<tree> args;
11470 ipa_parm_adjustment_vec adjustments;
11472 if (node->definition)
11473 args = ipa_get_vector_of_formal_parms (node->decl);
11474 else
11475 args = simd_clone_vector_of_formal_parm_types (node->decl);
11476 adjustments.create (args.length ());
11477 unsigned i, j, veclen;
11478 struct ipa_parm_adjustment adj;
11479 for (i = 0; i < node->simdclone->nargs; ++i)
11481 memset (&adj, 0, sizeof (adj));
11482 tree parm = args[i];
11483 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
11484 adj.base_index = i;
11485 adj.base = parm;
11487 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
11488 node->simdclone->args[i].orig_type = parm_type;
11490 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
11492 /* No adjustment necessary for scalar arguments. */
11493 adj.op = IPA_PARM_OP_COPY;
11495 else
11497 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
11498 veclen = node->simdclone->vecsize_int;
11499 else
11500 veclen = node->simdclone->vecsize_float;
11501 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
11502 if (veclen > node->simdclone->simdlen)
11503 veclen = node->simdclone->simdlen;
11504 adj.arg_prefix = "simd";
11505 adj.type = build_vector_type (parm_type, veclen);
11506 node->simdclone->args[i].vector_type = adj.type;
11507 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11509 adjustments.safe_push (adj);
11510 if (j == veclen)
11512 memset (&adj, 0, sizeof (adj));
11513 adj.op = IPA_PARM_OP_NEW;
11514 adj.arg_prefix = "simd";
11515 adj.base_index = i;
11516 adj.type = node->simdclone->args[i].vector_type;
11520 if (node->definition)
11521 node->simdclone->args[i].simd_array
11522 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
11523 parm_type, node->simdclone->simdlen);
11525 adjustments.safe_push (adj);
11528 if (node->simdclone->inbranch)
11530 tree base_type
11531 = simd_clone_compute_base_data_type (node->simdclone->origin,
11532 node->simdclone);
11534 memset (&adj, 0, sizeof (adj));
11535 adj.op = IPA_PARM_OP_NEW;
11536 adj.arg_prefix = "mask";
11538 adj.base_index = i;
11539 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
11540 veclen = node->simdclone->vecsize_int;
11541 else
11542 veclen = node->simdclone->vecsize_float;
11543 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
11544 if (veclen > node->simdclone->simdlen)
11545 veclen = node->simdclone->simdlen;
11546 adj.type = build_vector_type (base_type, veclen);
11547 adjustments.safe_push (adj);
11549 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11550 adjustments.safe_push (adj);
11552 /* We have previously allocated one extra entry for the mask. Use
11553 it and fill it. */
11554 struct cgraph_simd_clone *sc = node->simdclone;
11555 sc->nargs++;
11556 if (node->definition)
11558 sc->args[i].orig_arg
11559 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
11560 sc->args[i].simd_array
11561 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
11563 sc->args[i].orig_type = base_type;
11564 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
11567 if (node->definition)
11568 ipa_modify_formal_parameters (node->decl, adjustments);
11569 else
11571 tree new_arg_types = NULL_TREE, new_reversed;
11572 bool last_parm_void = false;
11573 if (args.length () > 0 && args.last () == void_type_node)
11574 last_parm_void = true;
11576 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
11577 j = adjustments.length ();
11578 for (i = 0; i < j; i++)
11580 struct ipa_parm_adjustment *adj = &adjustments[i];
11581 tree ptype;
11582 if (adj->op == IPA_PARM_OP_COPY)
11583 ptype = args[adj->base_index];
11584 else
11585 ptype = adj->type;
11586 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
11588 new_reversed = nreverse (new_arg_types);
11589 if (last_parm_void)
11591 if (new_reversed)
11592 TREE_CHAIN (new_arg_types) = void_list_node;
11593 else
11594 new_reversed = void_list_node;
11597 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
11598 TYPE_ARG_TYPES (new_type) = new_reversed;
11599 TREE_TYPE (node->decl) = new_type;
11601 adjustments.release ();
11603 args.release ();
11604 return adjustments;
11607 /* Initialize and copy the function arguments in NODE to their
11608 corresponding local simd arrays. Returns a fresh gimple_seq with
11609 the instruction sequence generated. */
11611 static gimple_seq
11612 simd_clone_init_simd_arrays (struct cgraph_node *node,
11613 ipa_parm_adjustment_vec adjustments)
11615 gimple_seq seq = NULL;
11616 unsigned i = 0, j = 0, k;
11618 for (tree arg = DECL_ARGUMENTS (node->decl);
11619 arg;
11620 arg = DECL_CHAIN (arg), i++, j++)
11622 if (adjustments[j].op == IPA_PARM_OP_COPY)
11623 continue;
11625 node->simdclone->args[i].vector_arg = arg;
11627 tree array = node->simdclone->args[i].simd_array;
11628 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
11630 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11631 tree ptr = build_fold_addr_expr (array);
11632 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11633 build_int_cst (ptype, 0));
11634 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11635 gimplify_and_add (t, &seq);
11637 else
11639 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
11640 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11641 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
11643 tree ptr = build_fold_addr_expr (array);
11644 int elemsize;
11645 if (k)
11647 arg = DECL_CHAIN (arg);
11648 j++;
11650 elemsize
11651 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
11652 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11653 build_int_cst (ptype, k * elemsize));
11654 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11655 gimplify_and_add (t, &seq);
11659 return seq;
11662 /* Callback info for ipa_simd_modify_stmt_ops below. */
11664 struct modify_stmt_info {
11665 ipa_parm_adjustment_vec adjustments;
11666 gimple stmt;
11667 /* True if the parent statement was modified by
11668 ipa_simd_modify_stmt_ops. */
11669 bool modified;
11672 /* Callback for walk_gimple_op.
11674 Adjust operands from a given statement as specified in the
11675 adjustments vector in the callback data. */
11677 static tree
11678 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
11680 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
11681 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
11682 tree *orig_tp = tp;
11683 if (TREE_CODE (*tp) == ADDR_EXPR)
11684 tp = &TREE_OPERAND (*tp, 0);
11685 struct ipa_parm_adjustment *cand = NULL;
11686 if (TREE_CODE (*tp) == PARM_DECL)
11687 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
11688 else
11690 if (TYPE_P (*tp))
11691 *walk_subtrees = 0;
11694 tree repl = NULL_TREE;
11695 if (cand)
11696 repl = unshare_expr (cand->new_decl);
11697 else
11699 if (tp != orig_tp)
11701 *walk_subtrees = 0;
11702 bool modified = info->modified;
11703 info->modified = false;
11704 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
11705 if (!info->modified)
11707 info->modified = modified;
11708 return NULL_TREE;
11710 info->modified = modified;
11711 repl = *tp;
11713 else
11714 return NULL_TREE;
11717 if (tp != orig_tp)
11719 repl = build_fold_addr_expr (repl);
11720 gimple stmt
11721 = gimple_build_assign (make_ssa_name (TREE_TYPE (repl), NULL), repl);
11722 repl = gimple_assign_lhs (stmt);
11723 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
11724 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11725 *orig_tp = repl;
11727 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
11729 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
11730 *tp = vce;
11732 else
11733 *tp = repl;
11735 info->modified = true;
11736 return NULL_TREE;
11739 /* Traverse the function body and perform all modifications as
11740 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11741 modified such that the replacement/reduction value will now be an
11742 offset into the corresponding simd_array.
11744 This function will replace all function argument uses with their
11745 corresponding simd array elements, and ajust the return values
11746 accordingly. */
11748 static void
11749 ipa_simd_modify_function_body (struct cgraph_node *node,
11750 ipa_parm_adjustment_vec adjustments,
11751 tree retval_array, tree iter)
11753 basic_block bb;
11754 unsigned int i, j, l;
11756 /* Re-use the adjustments array, but this time use it to replace
11757 every function argument use to an offset into the corresponding
11758 simd_array. */
11759 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
11761 if (!node->simdclone->args[i].vector_arg)
11762 continue;
11764 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
11765 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
11766 adjustments[j].new_decl
11767 = build4 (ARRAY_REF,
11768 basetype,
11769 node->simdclone->args[i].simd_array,
11770 iter,
11771 NULL_TREE, NULL_TREE);
11772 if (adjustments[j].op == IPA_PARM_OP_NONE
11773 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
11774 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
11777 l = adjustments.length ();
11778 for (i = 1; i < num_ssa_names; i++)
11780 tree name = ssa_name (i);
11781 if (name
11782 && SSA_NAME_VAR (name)
11783 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
11785 for (j = 0; j < l; j++)
11786 if (SSA_NAME_VAR (name) == adjustments[j].base
11787 && adjustments[j].new_decl)
11789 tree base_var;
11790 if (adjustments[j].new_ssa_base == NULL_TREE)
11792 base_var
11793 = copy_var_decl (adjustments[j].base,
11794 DECL_NAME (adjustments[j].base),
11795 TREE_TYPE (adjustments[j].base));
11796 adjustments[j].new_ssa_base = base_var;
11798 else
11799 base_var = adjustments[j].new_ssa_base;
11800 if (SSA_NAME_IS_DEFAULT_DEF (name))
11802 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11803 gimple_stmt_iterator gsi = gsi_after_labels (bb);
11804 tree new_decl = unshare_expr (adjustments[j].new_decl);
11805 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
11806 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11807 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
11808 gimple stmt = gimple_build_assign (name, new_decl);
11809 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11811 else
11812 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11817 struct modify_stmt_info info;
11818 info.adjustments = adjustments;
11820 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
11822 gimple_stmt_iterator gsi;
11824 gsi = gsi_start_bb (bb);
11825 while (!gsi_end_p (gsi))
11827 gimple stmt = gsi_stmt (gsi);
11828 info.stmt = stmt;
11829 struct walk_stmt_info wi;
11831 memset (&wi, 0, sizeof (wi));
11832 info.modified = false;
11833 wi.info = &info;
11834 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
11836 if (gimple_code (stmt) == GIMPLE_RETURN)
11838 tree retval = gimple_return_retval (stmt);
11839 if (!retval)
11841 gsi_remove (&gsi, true);
11842 continue;
11845 /* Replace `return foo' with `retval_array[iter] = foo'. */
11846 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
11847 retval_array, iter, NULL, NULL);
11848 stmt = gimple_build_assign (ref, retval);
11849 gsi_replace (&gsi, stmt, true);
11850 info.modified = true;
11853 if (info.modified)
11855 update_stmt (stmt);
11856 if (maybe_clean_eh_stmt (stmt))
11857 gimple_purge_dead_eh_edges (gimple_bb (stmt));
11859 gsi_next (&gsi);
11864 /* Adjust the argument types in NODE to their appropriate vector
11865 counterparts. */
11867 static void
11868 simd_clone_adjust (struct cgraph_node *node)
11870 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
11872 targetm.simd_clone.adjust (node);
11874 tree retval = simd_clone_adjust_return_type (node);
11875 ipa_parm_adjustment_vec adjustments
11876 = simd_clone_adjust_argument_types (node);
11878 push_gimplify_context ();
11880 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
11882 /* Adjust all uses of vector arguments accordingly. Adjust all
11883 return values accordingly. */
11884 tree iter = create_tmp_var (unsigned_type_node, "iter");
11885 tree iter1 = make_ssa_name (iter, NULL);
11886 tree iter2 = make_ssa_name (iter, NULL);
11887 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
11889 /* Initialize the iteration variable. */
11890 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11891 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
11892 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
11893 /* Insert the SIMD array and iv initialization at function
11894 entry. */
11895 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
11897 pop_gimplify_context (NULL);
11899 /* Create a new BB right before the original exit BB, to hold the
11900 iteration increment and the condition/branch. */
11901 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
11902 basic_block incr_bb = create_empty_bb (orig_exit);
11903 add_bb_to_loop (incr_bb, body_bb->loop_father);
11904 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
11905 flag. Set it now to be a FALLTHRU_EDGE. */
11906 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
11907 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
11908 for (unsigned i = 0;
11909 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
11911 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
11912 redirect_edge_succ (e, incr_bb);
11914 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
11915 e->probability = REG_BR_PROB_BASE;
11916 gsi = gsi_last_bb (incr_bb);
11917 gimple g = gimple_build_assign_with_ops (PLUS_EXPR, iter2, iter1,
11918 build_int_cst (unsigned_type_node,
11919 1));
11920 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11922 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
11923 struct loop *loop = alloc_loop ();
11924 cfun->has_force_vectorize_loops = true;
11925 loop->safelen = node->simdclone->simdlen;
11926 loop->force_vectorize = true;
11927 loop->header = body_bb;
11929 /* Branch around the body if the mask applies. */
11930 if (node->simdclone->inbranch)
11932 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
11933 tree mask_array
11934 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
11935 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)), NULL);
11936 tree aref = build4 (ARRAY_REF,
11937 TREE_TYPE (TREE_TYPE (mask_array)),
11938 mask_array, iter1,
11939 NULL, NULL);
11940 g = gimple_build_assign (mask, aref);
11941 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11942 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
11943 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
11945 aref = build1 (VIEW_CONVERT_EXPR,
11946 build_nonstandard_integer_type (bitsize, 0), mask);
11947 mask = make_ssa_name (TREE_TYPE (aref), NULL);
11948 g = gimple_build_assign (mask, aref);
11949 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11952 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
11953 NULL, NULL);
11954 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11955 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
11956 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
11959 /* Generate the condition. */
11960 g = gimple_build_cond (LT_EXPR,
11961 iter2,
11962 build_int_cst (unsigned_type_node,
11963 node->simdclone->simdlen),
11964 NULL, NULL);
11965 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11966 e = split_block (incr_bb, gsi_stmt (gsi));
11967 basic_block latch_bb = e->dest;
11968 basic_block new_exit_bb;
11969 new_exit_bb = split_block (latch_bb, NULL)->dest;
11970 loop->latch = latch_bb;
11972 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
11974 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
11975 /* The successor of incr_bb is already pointing to latch_bb; just
11976 change the flags.
11977 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
11978 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
11980 gimple phi = create_phi_node (iter1, body_bb);
11981 edge preheader_edge = find_edge (entry_bb, body_bb);
11982 edge latch_edge = single_succ_edge (latch_bb);
11983 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
11984 UNKNOWN_LOCATION);
11985 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
11987 /* Generate the new return. */
11988 gsi = gsi_last_bb (new_exit_bb);
11989 if (retval
11990 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
11991 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
11992 retval = TREE_OPERAND (retval, 0);
11993 else if (retval)
11995 retval = build1 (VIEW_CONVERT_EXPR,
11996 TREE_TYPE (TREE_TYPE (node->decl)),
11997 retval);
11998 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
11999 false, GSI_CONTINUE_LINKING);
12001 g = gimple_build_return (retval);
12002 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12004 /* Handle aligned clauses by replacing default defs of the aligned
12005 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
12006 lhs. Handle linear by adding PHIs. */
12007 for (unsigned i = 0; i < node->simdclone->nargs; i++)
12008 if (node->simdclone->args[i].alignment
12009 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
12010 && (node->simdclone->args[i].alignment
12011 & (node->simdclone->args[i].alignment - 1)) == 0
12012 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
12013 == POINTER_TYPE)
12015 unsigned int alignment = node->simdclone->args[i].alignment;
12016 tree orig_arg = node->simdclone->args[i].orig_arg;
12017 tree def = ssa_default_def (cfun, orig_arg);
12018 if (def && !has_zero_uses (def))
12020 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
12021 gimple_seq seq = NULL;
12022 bool need_cvt = false;
12023 gimple call
12024 = gimple_build_call (fn, 2, def, size_int (alignment));
12025 g = call;
12026 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
12027 ptr_type_node))
12028 need_cvt = true;
12029 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg, NULL);
12030 gimple_call_set_lhs (g, t);
12031 gimple_seq_add_stmt_without_update (&seq, g);
12032 if (need_cvt)
12034 t = make_ssa_name (orig_arg, NULL);
12035 g = gimple_build_assign_with_ops (NOP_EXPR, t,
12036 gimple_call_lhs (g),
12037 NULL_TREE);
12038 gimple_seq_add_stmt_without_update (&seq, g);
12040 gsi_insert_seq_on_edge_immediate
12041 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
12043 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12044 int freq = compute_call_stmt_bb_frequency (current_function_decl,
12045 entry_bb);
12046 node->create_edge (cgraph_node::get_create (fn),
12047 call, entry_bb->count, freq);
12049 imm_use_iterator iter;
12050 use_operand_p use_p;
12051 gimple use_stmt;
12052 tree repl = gimple_get_lhs (g);
12053 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12054 if (is_gimple_debug (use_stmt) || use_stmt == call)
12055 continue;
12056 else
12057 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12058 SET_USE (use_p, repl);
12061 else if (node->simdclone->args[i].arg_type
12062 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
12064 tree orig_arg = node->simdclone->args[i].orig_arg;
12065 tree def = ssa_default_def (cfun, orig_arg);
12066 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12067 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
12068 if (def && !has_zero_uses (def))
12070 iter1 = make_ssa_name (orig_arg, NULL);
12071 iter2 = make_ssa_name (orig_arg, NULL);
12072 phi = create_phi_node (iter1, body_bb);
12073 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
12074 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12075 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12076 ? PLUS_EXPR : POINTER_PLUS_EXPR;
12077 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12078 ? TREE_TYPE (orig_arg) : sizetype;
12079 tree addcst
12080 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
12081 g = gimple_build_assign_with_ops (code, iter2, iter1, addcst);
12082 gsi = gsi_last_bb (incr_bb);
12083 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
12085 imm_use_iterator iter;
12086 use_operand_p use_p;
12087 gimple use_stmt;
12088 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12089 if (use_stmt == phi)
12090 continue;
12091 else
12092 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12093 SET_USE (use_p, iter1);
12097 calculate_dominance_info (CDI_DOMINATORS);
12098 add_loop (loop, loop->header->loop_father);
12099 update_ssa (TODO_update_ssa);
12101 pop_cfun ();
12104 /* If the function in NODE is tagged as an elemental SIMD function,
12105 create the appropriate SIMD clones. */
12107 static void
12108 expand_simd_clones (struct cgraph_node *node)
12110 tree attr = lookup_attribute ("omp declare simd",
12111 DECL_ATTRIBUTES (node->decl));
12112 if (attr == NULL_TREE
12113 || node->global.inlined_to
12114 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
12115 return;
12117 /* Ignore
12118 #pragma omp declare simd
12119 extern int foo ();
12120 in C, there we don't know the argument types at all. */
12121 if (!node->definition
12122 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
12123 return;
12127 /* Start with parsing the "omp declare simd" attribute(s). */
12128 bool inbranch_clause_specified;
12129 struct cgraph_simd_clone *clone_info
12130 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
12131 &inbranch_clause_specified);
12132 if (clone_info == NULL)
12133 continue;
12135 int orig_simdlen = clone_info->simdlen;
12136 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
12137 /* The target can return 0 (no simd clones should be created),
12138 1 (just one ISA of simd clones should be created) or higher
12139 count of ISA variants. In that case, clone_info is initialized
12140 for the first ISA variant. */
12141 int count
12142 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
12143 base_type, 0);
12144 if (count == 0)
12145 continue;
12147 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
12148 also create one inbranch and one !inbranch clone of it. */
12149 for (int i = 0; i < count * 2; i++)
12151 struct cgraph_simd_clone *clone = clone_info;
12152 if (inbranch_clause_specified && (i & 1) != 0)
12153 continue;
12155 if (i != 0)
12157 clone = simd_clone_struct_alloc (clone_info->nargs
12158 + ((i & 1) != 0));
12159 simd_clone_struct_copy (clone, clone_info);
12160 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
12161 and simd_clone_adjust_argument_types did to the first
12162 clone's info. */
12163 clone->nargs -= clone_info->inbranch;
12164 clone->simdlen = orig_simdlen;
12165 /* And call the target hook again to get the right ISA. */
12166 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
12167 base_type,
12168 i / 2);
12169 if ((i & 1) != 0)
12170 clone->inbranch = 1;
12173 /* simd_clone_mangle might fail if such a clone has been created
12174 already. */
12175 tree id = simd_clone_mangle (node, clone);
12176 if (id == NULL_TREE)
12177 continue;
12179 /* Only when we are sure we want to create the clone actually
12180 clone the function (or definitions) or create another
12181 extern FUNCTION_DECL (for prototypes without definitions). */
12182 struct cgraph_node *n = simd_clone_create (node);
12183 if (n == NULL)
12184 continue;
12186 n->simdclone = clone;
12187 clone->origin = node;
12188 clone->next_clone = NULL;
12189 if (node->simd_clones == NULL)
12191 clone->prev_clone = n;
12192 node->simd_clones = n;
12194 else
12196 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
12197 clone->prev_clone->simdclone->next_clone = n;
12198 node->simd_clones->simdclone->prev_clone = n;
12200 symtab->change_decl_assembler_name (n->decl, id);
12201 /* And finally adjust the return type, parameters and for
12202 definitions also function body. */
12203 if (node->definition)
12204 simd_clone_adjust (n);
12205 else
12207 simd_clone_adjust_return_type (n);
12208 simd_clone_adjust_argument_types (n);
12212 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
12215 /* Entry point for IPA simd clone creation pass. */
12217 static unsigned int
12218 ipa_omp_simd_clone (void)
12220 struct cgraph_node *node;
12221 FOR_EACH_FUNCTION (node)
12222 expand_simd_clones (node);
12223 return 0;
12226 namespace {
12228 const pass_data pass_data_omp_simd_clone =
12230 SIMPLE_IPA_PASS, /* type */
12231 "simdclone", /* name */
12232 OPTGROUP_NONE, /* optinfo_flags */
12233 TV_NONE, /* tv_id */
12234 ( PROP_ssa | PROP_cfg ), /* properties_required */
12235 0, /* properties_provided */
12236 0, /* properties_destroyed */
12237 0, /* todo_flags_start */
12238 0, /* todo_flags_finish */
12241 class pass_omp_simd_clone : public simple_ipa_opt_pass
12243 public:
12244 pass_omp_simd_clone(gcc::context *ctxt)
12245 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
12248 /* opt_pass methods: */
12249 virtual bool gate (function *);
12250 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
12253 bool
12254 pass_omp_simd_clone::gate (function *)
12256 return ((flag_openmp || flag_openmp_simd
12257 || flag_cilkplus
12258 || (in_lto_p && !flag_wpa))
12259 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
12262 } // anon namespace
12264 simple_ipa_opt_pass *
12265 make_pass_omp_simd_clone (gcc::context *ctxt)
12267 return new pass_omp_simd_clone (ctxt);
12270 #include "gt-omp-low.h"