PR preprocessor/60723 - missing system-ness marks for macro tokens
[official-gcc.git] / gcc / omp-low.c
blobe1bf34d8f87ec9e2cd3d820b38b03ece1833bbb9
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "rtl.h"
32 #include "pointer-set.h"
33 #include "basic-block.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-fold.h"
37 #include "gimple-expr.h"
38 #include "is-a.h"
39 #include "gimple.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "gimple-walk.h"
44 #include "tree-iterator.h"
45 #include "tree-inline.h"
46 #include "langhooks.h"
47 #include "diagnostic-core.h"
48 #include "gimple-ssa.h"
49 #include "cgraph.h"
50 #include "tree-cfg.h"
51 #include "tree-phinodes.h"
52 #include "ssa-iterators.h"
53 #include "tree-ssanames.h"
54 #include "tree-into-ssa.h"
55 #include "expr.h"
56 #include "tree-dfa.h"
57 #include "tree-ssa.h"
58 #include "flags.h"
59 #include "function.h"
60 #include "expr.h"
61 #include "tree-pass.h"
62 #include "except.h"
63 #include "splay-tree.h"
64 #include "optabs.h"
65 #include "cfgloop.h"
66 #include "target.h"
67 #include "omp-low.h"
68 #include "gimple-low.h"
69 #include "tree-cfgcleanup.h"
70 #include "pretty-print.h"
71 #include "ipa-prop.h"
72 #include "tree-nested.h"
73 #include "tree-eh.h"
76 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
77 phases. The first phase scans the function looking for OMP statements
78 and then for variables that must be replaced to satisfy data sharing
79 clauses. The second phase expands code for the constructs, as well as
80 re-gimplifying things when variables have been replaced with complex
81 expressions.
83 Final code generation is done by pass_expand_omp. The flowgraph is
84 scanned for parallel regions which are then moved to a new
85 function, to be invoked by the thread library. */
87 /* Parallel region information. Every parallel and workshare
88 directive is enclosed between two markers, the OMP_* directive
89 and a corresponding OMP_RETURN statement. */
91 struct omp_region
93 /* The enclosing region. */
94 struct omp_region *outer;
96 /* First child region. */
97 struct omp_region *inner;
99 /* Next peer region. */
100 struct omp_region *next;
102 /* Block containing the omp directive as its last stmt. */
103 basic_block entry;
105 /* Block containing the OMP_RETURN as its last stmt. */
106 basic_block exit;
108 /* Block containing the OMP_CONTINUE as its last stmt. */
109 basic_block cont;
111 /* If this is a combined parallel+workshare region, this is a list
112 of additional arguments needed by the combined parallel+workshare
113 library call. */
114 vec<tree, va_gc> *ws_args;
116 /* The code for the omp directive of this region. */
117 enum gimple_code type;
119 /* Schedule kind, only used for OMP_FOR type regions. */
120 enum omp_clause_schedule_kind sched_kind;
122 /* True if this is a combined parallel+workshare region. */
123 bool is_combined_parallel;
126 /* Context structure. Used to store information about each parallel
127 directive in the code. */
129 typedef struct omp_context
131 /* This field must be at the beginning, as we do "inheritance": Some
132 callback functions for tree-inline.c (e.g., omp_copy_decl)
133 receive a copy_body_data pointer that is up-casted to an
134 omp_context pointer. */
135 copy_body_data cb;
137 /* The tree of contexts corresponding to the encountered constructs. */
138 struct omp_context *outer;
139 gimple stmt;
141 /* Map variables to fields in a structure that allows communication
142 between sending and receiving threads. */
143 splay_tree field_map;
144 tree record_type;
145 tree sender_decl;
146 tree receiver_decl;
148 /* These are used just by task contexts, if task firstprivate fn is
149 needed. srecord_type is used to communicate from the thread
150 that encountered the task construct to task firstprivate fn,
151 record_type is allocated by GOMP_task, initialized by task firstprivate
152 fn and passed to the task body fn. */
153 splay_tree sfield_map;
154 tree srecord_type;
156 /* A chain of variables to add to the top-level block surrounding the
157 construct. In the case of a parallel, this is in the child function. */
158 tree block_vars;
160 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
161 barriers should jump to during omplower pass. */
162 tree cancel_label;
164 /* What to do with variables with implicitly determined sharing
165 attributes. */
166 enum omp_clause_default_kind default_kind;
168 /* Nesting depth of this context. Used to beautify error messages re
169 invalid gotos. The outermost ctx is depth 1, with depth 0 being
170 reserved for the main body of the function. */
171 int depth;
173 /* True if this parallel directive is nested within another. */
174 bool is_nested;
176 /* True if this construct can be cancelled. */
177 bool cancellable;
178 } omp_context;
181 struct omp_for_data_loop
183 tree v, n1, n2, step;
184 enum tree_code cond_code;
187 /* A structure describing the main elements of a parallel loop. */
189 struct omp_for_data
191 struct omp_for_data_loop loop;
192 tree chunk_size;
193 gimple for_stmt;
194 tree pre, iter_type;
195 int collapse;
196 bool have_nowait, have_ordered;
197 enum omp_clause_schedule_kind sched_kind;
198 struct omp_for_data_loop *loops;
202 static splay_tree all_contexts;
203 static int taskreg_nesting_level;
204 static int target_nesting_level;
205 static struct omp_region *root_omp_region;
206 static bitmap task_shared_vars;
208 static void scan_omp (gimple_seq *, omp_context *);
209 static tree scan_omp_1_op (tree *, int *, void *);
211 #define WALK_SUBSTMTS \
212 case GIMPLE_BIND: \
213 case GIMPLE_TRY: \
214 case GIMPLE_CATCH: \
215 case GIMPLE_EH_FILTER: \
216 case GIMPLE_TRANSACTION: \
217 /* The sub-statements for these should be walked. */ \
218 *handled_ops_p = false; \
219 break;
221 /* Convenience function for calling scan_omp_1_op on tree operands. */
223 static inline tree
224 scan_omp_op (tree *tp, omp_context *ctx)
226 struct walk_stmt_info wi;
228 memset (&wi, 0, sizeof (wi));
229 wi.info = ctx;
230 wi.want_locations = true;
232 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
235 static void lower_omp (gimple_seq *, omp_context *);
236 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
237 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
239 /* Find an OpenMP clause of type KIND within CLAUSES. */
241 tree
242 find_omp_clause (tree clauses, enum omp_clause_code kind)
244 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
245 if (OMP_CLAUSE_CODE (clauses) == kind)
246 return clauses;
248 return NULL_TREE;
251 /* Return true if CTX is for an omp parallel. */
253 static inline bool
254 is_parallel_ctx (omp_context *ctx)
256 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
260 /* Return true if CTX is for an omp task. */
262 static inline bool
263 is_task_ctx (omp_context *ctx)
265 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
269 /* Return true if CTX is for an omp parallel or omp task. */
271 static inline bool
272 is_taskreg_ctx (omp_context *ctx)
274 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
275 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
279 /* Return true if REGION is a combined parallel+workshare region. */
281 static inline bool
282 is_combined_parallel (struct omp_region *region)
284 return region->is_combined_parallel;
288 /* Extract the header elements of parallel loop FOR_STMT and store
289 them into *FD. */
291 static void
292 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
293 struct omp_for_data_loop *loops)
295 tree t, var, *collapse_iter, *collapse_count;
296 tree count = NULL_TREE, iter_type = long_integer_type_node;
297 struct omp_for_data_loop *loop;
298 int i;
299 struct omp_for_data_loop dummy_loop;
300 location_t loc = gimple_location (for_stmt);
301 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
302 bool distribute = gimple_omp_for_kind (for_stmt)
303 == GF_OMP_FOR_KIND_DISTRIBUTE;
305 fd->for_stmt = for_stmt;
306 fd->pre = NULL;
307 fd->collapse = gimple_omp_for_collapse (for_stmt);
308 if (fd->collapse > 1)
309 fd->loops = loops;
310 else
311 fd->loops = &fd->loop;
313 fd->have_nowait = distribute || simd;
314 fd->have_ordered = false;
315 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
316 fd->chunk_size = NULL_TREE;
317 collapse_iter = NULL;
318 collapse_count = NULL;
320 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
321 switch (OMP_CLAUSE_CODE (t))
323 case OMP_CLAUSE_NOWAIT:
324 fd->have_nowait = true;
325 break;
326 case OMP_CLAUSE_ORDERED:
327 fd->have_ordered = true;
328 break;
329 case OMP_CLAUSE_SCHEDULE:
330 gcc_assert (!distribute);
331 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
332 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
333 break;
334 case OMP_CLAUSE_DIST_SCHEDULE:
335 gcc_assert (distribute);
336 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
337 break;
338 case OMP_CLAUSE_COLLAPSE:
339 if (fd->collapse > 1)
341 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
342 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
344 default:
345 break;
348 /* FIXME: for now map schedule(auto) to schedule(static).
349 There should be analysis to determine whether all iterations
350 are approximately the same amount of work (then schedule(static)
351 is best) or if it varies (then schedule(dynamic,N) is better). */
352 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
354 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
355 gcc_assert (fd->chunk_size == NULL);
357 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
358 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
359 gcc_assert (fd->chunk_size == NULL);
360 else if (fd->chunk_size == NULL)
362 /* We only need to compute a default chunk size for ordered
363 static loops and dynamic loops. */
364 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
365 || fd->have_ordered)
366 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
367 ? integer_zero_node : integer_one_node;
370 for (i = 0; i < fd->collapse; i++)
372 if (fd->collapse == 1)
373 loop = &fd->loop;
374 else if (loops != NULL)
375 loop = loops + i;
376 else
377 loop = &dummy_loop;
379 loop->v = gimple_omp_for_index (for_stmt, i);
380 gcc_assert (SSA_VAR_P (loop->v));
381 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
382 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
383 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
384 loop->n1 = gimple_omp_for_initial (for_stmt, i);
386 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
387 loop->n2 = gimple_omp_for_final (for_stmt, i);
388 switch (loop->cond_code)
390 case LT_EXPR:
391 case GT_EXPR:
392 break;
393 case NE_EXPR:
394 gcc_assert (gimple_omp_for_kind (for_stmt)
395 == GF_OMP_FOR_KIND_CILKSIMD);
396 break;
397 case LE_EXPR:
398 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
399 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
400 else
401 loop->n2 = fold_build2_loc (loc,
402 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
403 build_int_cst (TREE_TYPE (loop->n2), 1));
404 loop->cond_code = LT_EXPR;
405 break;
406 case GE_EXPR:
407 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
408 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
409 else
410 loop->n2 = fold_build2_loc (loc,
411 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
412 build_int_cst (TREE_TYPE (loop->n2), 1));
413 loop->cond_code = GT_EXPR;
414 break;
415 default:
416 gcc_unreachable ();
419 t = gimple_omp_for_incr (for_stmt, i);
420 gcc_assert (TREE_OPERAND (t, 0) == var);
421 switch (TREE_CODE (t))
423 case PLUS_EXPR:
424 loop->step = TREE_OPERAND (t, 1);
425 break;
426 case POINTER_PLUS_EXPR:
427 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
428 break;
429 case MINUS_EXPR:
430 loop->step = TREE_OPERAND (t, 1);
431 loop->step = fold_build1_loc (loc,
432 NEGATE_EXPR, TREE_TYPE (loop->step),
433 loop->step);
434 break;
435 default:
436 gcc_unreachable ();
439 if (simd
440 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
441 && !fd->have_ordered))
443 if (fd->collapse == 1)
444 iter_type = TREE_TYPE (loop->v);
445 else if (i == 0
446 || TYPE_PRECISION (iter_type)
447 < TYPE_PRECISION (TREE_TYPE (loop->v)))
448 iter_type
449 = build_nonstandard_integer_type
450 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
452 else if (iter_type != long_long_unsigned_type_node)
454 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
455 iter_type = long_long_unsigned_type_node;
456 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
457 && TYPE_PRECISION (TREE_TYPE (loop->v))
458 >= TYPE_PRECISION (iter_type))
460 tree n;
462 if (loop->cond_code == LT_EXPR)
463 n = fold_build2_loc (loc,
464 PLUS_EXPR, TREE_TYPE (loop->v),
465 loop->n2, loop->step);
466 else
467 n = loop->n1;
468 if (TREE_CODE (n) != INTEGER_CST
469 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
470 iter_type = long_long_unsigned_type_node;
472 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
473 > TYPE_PRECISION (iter_type))
475 tree n1, n2;
477 if (loop->cond_code == LT_EXPR)
479 n1 = loop->n1;
480 n2 = fold_build2_loc (loc,
481 PLUS_EXPR, TREE_TYPE (loop->v),
482 loop->n2, loop->step);
484 else
486 n1 = fold_build2_loc (loc,
487 MINUS_EXPR, TREE_TYPE (loop->v),
488 loop->n2, loop->step);
489 n2 = loop->n1;
491 if (TREE_CODE (n1) != INTEGER_CST
492 || TREE_CODE (n2) != INTEGER_CST
493 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
494 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
495 iter_type = long_long_unsigned_type_node;
499 if (collapse_count && *collapse_count == NULL)
501 t = fold_binary (loop->cond_code, boolean_type_node,
502 fold_convert (TREE_TYPE (loop->v), loop->n1),
503 fold_convert (TREE_TYPE (loop->v), loop->n2));
504 if (t && integer_zerop (t))
505 count = build_zero_cst (long_long_unsigned_type_node);
506 else if ((i == 0 || count != NULL_TREE)
507 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
508 && TREE_CONSTANT (loop->n1)
509 && TREE_CONSTANT (loop->n2)
510 && TREE_CODE (loop->step) == INTEGER_CST)
512 tree itype = TREE_TYPE (loop->v);
514 if (POINTER_TYPE_P (itype))
515 itype = signed_type_for (itype);
516 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
517 t = fold_build2_loc (loc,
518 PLUS_EXPR, itype,
519 fold_convert_loc (loc, itype, loop->step), t);
520 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
521 fold_convert_loc (loc, itype, loop->n2));
522 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
523 fold_convert_loc (loc, itype, loop->n1));
524 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
525 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
526 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
527 fold_build1_loc (loc, NEGATE_EXPR, itype,
528 fold_convert_loc (loc, itype,
529 loop->step)));
530 else
531 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
532 fold_convert_loc (loc, itype, loop->step));
533 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
534 if (count != NULL_TREE)
535 count = fold_build2_loc (loc,
536 MULT_EXPR, long_long_unsigned_type_node,
537 count, t);
538 else
539 count = t;
540 if (TREE_CODE (count) != INTEGER_CST)
541 count = NULL_TREE;
543 else if (count && !integer_zerop (count))
544 count = NULL_TREE;
548 if (count
549 && !simd
550 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
551 || fd->have_ordered))
553 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
554 iter_type = long_long_unsigned_type_node;
555 else
556 iter_type = long_integer_type_node;
558 else if (collapse_iter && *collapse_iter != NULL)
559 iter_type = TREE_TYPE (*collapse_iter);
560 fd->iter_type = iter_type;
561 if (collapse_iter && *collapse_iter == NULL)
562 *collapse_iter = create_tmp_var (iter_type, ".iter");
563 if (collapse_count && *collapse_count == NULL)
565 if (count)
566 *collapse_count = fold_convert_loc (loc, iter_type, count);
567 else
568 *collapse_count = create_tmp_var (iter_type, ".count");
571 if (fd->collapse > 1)
573 fd->loop.v = *collapse_iter;
574 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
575 fd->loop.n2 = *collapse_count;
576 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
577 fd->loop.cond_code = LT_EXPR;
582 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
583 is the immediate dominator of PAR_ENTRY_BB, return true if there
584 are no data dependencies that would prevent expanding the parallel
585 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
587 When expanding a combined parallel+workshare region, the call to
588 the child function may need additional arguments in the case of
589 GIMPLE_OMP_FOR regions. In some cases, these arguments are
590 computed out of variables passed in from the parent to the child
591 via 'struct .omp_data_s'. For instance:
593 #pragma omp parallel for schedule (guided, i * 4)
594 for (j ...)
596 Is lowered into:
598 # BLOCK 2 (PAR_ENTRY_BB)
599 .omp_data_o.i = i;
600 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
602 # BLOCK 3 (WS_ENTRY_BB)
603 .omp_data_i = &.omp_data_o;
604 D.1667 = .omp_data_i->i;
605 D.1598 = D.1667 * 4;
606 #pragma omp for schedule (guided, D.1598)
608 When we outline the parallel region, the call to the child function
609 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
610 that value is computed *after* the call site. So, in principle we
611 cannot do the transformation.
613 To see whether the code in WS_ENTRY_BB blocks the combined
614 parallel+workshare call, we collect all the variables used in the
615 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
616 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
617 call.
619 FIXME. If we had the SSA form built at this point, we could merely
620 hoist the code in block 3 into block 2 and be done with it. But at
621 this point we don't have dataflow information and though we could
622 hack something up here, it is really not worth the aggravation. */
624 static bool
625 workshare_safe_to_combine_p (basic_block ws_entry_bb)
627 struct omp_for_data fd;
628 gimple ws_stmt = last_stmt (ws_entry_bb);
630 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
631 return true;
633 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
635 extract_omp_for_data (ws_stmt, &fd, NULL);
637 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
638 return false;
639 if (fd.iter_type != long_integer_type_node)
640 return false;
642 /* FIXME. We give up too easily here. If any of these arguments
643 are not constants, they will likely involve variables that have
644 been mapped into fields of .omp_data_s for sharing with the child
645 function. With appropriate data flow, it would be possible to
646 see through this. */
647 if (!is_gimple_min_invariant (fd.loop.n1)
648 || !is_gimple_min_invariant (fd.loop.n2)
649 || !is_gimple_min_invariant (fd.loop.step)
650 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
651 return false;
653 return true;
657 /* Collect additional arguments needed to emit a combined
658 parallel+workshare call. WS_STMT is the workshare directive being
659 expanded. */
661 static vec<tree, va_gc> *
662 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
664 tree t;
665 location_t loc = gimple_location (ws_stmt);
666 vec<tree, va_gc> *ws_args;
668 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
670 struct omp_for_data fd;
671 tree n1, n2;
673 extract_omp_for_data (ws_stmt, &fd, NULL);
674 n1 = fd.loop.n1;
675 n2 = fd.loop.n2;
677 if (gimple_omp_for_combined_into_p (ws_stmt))
679 tree innerc
680 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
681 OMP_CLAUSE__LOOPTEMP_);
682 gcc_assert (innerc);
683 n1 = OMP_CLAUSE_DECL (innerc);
684 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
685 OMP_CLAUSE__LOOPTEMP_);
686 gcc_assert (innerc);
687 n2 = OMP_CLAUSE_DECL (innerc);
690 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
692 t = fold_convert_loc (loc, long_integer_type_node, n1);
693 ws_args->quick_push (t);
695 t = fold_convert_loc (loc, long_integer_type_node, n2);
696 ws_args->quick_push (t);
698 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
699 ws_args->quick_push (t);
701 if (fd.chunk_size)
703 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
704 ws_args->quick_push (t);
707 return ws_args;
709 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
711 /* Number of sections is equal to the number of edges from the
712 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
713 the exit of the sections region. */
714 basic_block bb = single_succ (gimple_bb (ws_stmt));
715 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
716 vec_alloc (ws_args, 1);
717 ws_args->quick_push (t);
718 return ws_args;
721 gcc_unreachable ();
725 /* Discover whether REGION is a combined parallel+workshare region. */
727 static void
728 determine_parallel_type (struct omp_region *region)
730 basic_block par_entry_bb, par_exit_bb;
731 basic_block ws_entry_bb, ws_exit_bb;
733 if (region == NULL || region->inner == NULL
734 || region->exit == NULL || region->inner->exit == NULL
735 || region->inner->cont == NULL)
736 return;
738 /* We only support parallel+for and parallel+sections. */
739 if (region->type != GIMPLE_OMP_PARALLEL
740 || (region->inner->type != GIMPLE_OMP_FOR
741 && region->inner->type != GIMPLE_OMP_SECTIONS))
742 return;
744 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
745 WS_EXIT_BB -> PAR_EXIT_BB. */
746 par_entry_bb = region->entry;
747 par_exit_bb = region->exit;
748 ws_entry_bb = region->inner->entry;
749 ws_exit_bb = region->inner->exit;
751 if (single_succ (par_entry_bb) == ws_entry_bb
752 && single_succ (ws_exit_bb) == par_exit_bb
753 && workshare_safe_to_combine_p (ws_entry_bb)
754 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
755 || (last_and_only_stmt (ws_entry_bb)
756 && last_and_only_stmt (par_exit_bb))))
758 gimple par_stmt = last_stmt (par_entry_bb);
759 gimple ws_stmt = last_stmt (ws_entry_bb);
761 if (region->inner->type == GIMPLE_OMP_FOR)
763 /* If this is a combined parallel loop, we need to determine
764 whether or not to use the combined library calls. There
765 are two cases where we do not apply the transformation:
766 static loops and any kind of ordered loop. In the first
767 case, we already open code the loop so there is no need
768 to do anything else. In the latter case, the combined
769 parallel loop call would still need extra synchronization
770 to implement ordered semantics, so there would not be any
771 gain in using the combined call. */
772 tree clauses = gimple_omp_for_clauses (ws_stmt);
773 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
774 if (c == NULL
775 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
776 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
778 region->is_combined_parallel = false;
779 region->inner->is_combined_parallel = false;
780 return;
784 region->is_combined_parallel = true;
785 region->inner->is_combined_parallel = true;
786 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
791 /* Return true if EXPR is variable sized. */
793 static inline bool
794 is_variable_sized (const_tree expr)
796 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
799 /* Return true if DECL is a reference type. */
801 static inline bool
802 is_reference (tree decl)
804 return lang_hooks.decls.omp_privatize_by_reference (decl);
807 /* Lookup variables in the decl or field splay trees. The "maybe" form
808 allows for the variable form to not have been entered, otherwise we
809 assert that the variable must have been entered. */
811 static inline tree
812 lookup_decl (tree var, omp_context *ctx)
814 tree *n;
815 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
816 return *n;
819 static inline tree
820 maybe_lookup_decl (const_tree var, omp_context *ctx)
822 tree *n;
823 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
824 return n ? *n : NULL_TREE;
827 static inline tree
828 lookup_field (tree var, omp_context *ctx)
830 splay_tree_node n;
831 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
832 return (tree) n->value;
835 static inline tree
836 lookup_sfield (tree var, omp_context *ctx)
838 splay_tree_node n;
839 n = splay_tree_lookup (ctx->sfield_map
840 ? ctx->sfield_map : ctx->field_map,
841 (splay_tree_key) var);
842 return (tree) n->value;
845 static inline tree
846 maybe_lookup_field (tree var, omp_context *ctx)
848 splay_tree_node n;
849 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
850 return n ? (tree) n->value : NULL_TREE;
853 /* Return true if DECL should be copied by pointer. SHARED_CTX is
854 the parallel context if DECL is to be shared. */
856 static bool
857 use_pointer_for_field (tree decl, omp_context *shared_ctx)
859 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
860 return true;
862 /* We can only use copy-in/copy-out semantics for shared variables
863 when we know the value is not accessible from an outer scope. */
864 if (shared_ctx)
866 /* ??? Trivially accessible from anywhere. But why would we even
867 be passing an address in this case? Should we simply assert
868 this to be false, or should we have a cleanup pass that removes
869 these from the list of mappings? */
870 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
871 return true;
873 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
874 without analyzing the expression whether or not its location
875 is accessible to anyone else. In the case of nested parallel
876 regions it certainly may be. */
877 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
878 return true;
880 /* Do not use copy-in/copy-out for variables that have their
881 address taken. */
882 if (TREE_ADDRESSABLE (decl))
883 return true;
885 /* lower_send_shared_vars only uses copy-in, but not copy-out
886 for these. */
887 if (TREE_READONLY (decl)
888 || ((TREE_CODE (decl) == RESULT_DECL
889 || TREE_CODE (decl) == PARM_DECL)
890 && DECL_BY_REFERENCE (decl)))
891 return false;
893 /* Disallow copy-in/out in nested parallel if
894 decl is shared in outer parallel, otherwise
895 each thread could store the shared variable
896 in its own copy-in location, making the
897 variable no longer really shared. */
898 if (shared_ctx->is_nested)
900 omp_context *up;
902 for (up = shared_ctx->outer; up; up = up->outer)
903 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
904 break;
906 if (up)
908 tree c;
910 for (c = gimple_omp_taskreg_clauses (up->stmt);
911 c; c = OMP_CLAUSE_CHAIN (c))
912 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
913 && OMP_CLAUSE_DECL (c) == decl)
914 break;
916 if (c)
917 goto maybe_mark_addressable_and_ret;
921 /* For tasks avoid using copy-in/out. As tasks can be
922 deferred or executed in different thread, when GOMP_task
923 returns, the task hasn't necessarily terminated. */
924 if (is_task_ctx (shared_ctx))
926 tree outer;
927 maybe_mark_addressable_and_ret:
928 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
929 if (is_gimple_reg (outer))
931 /* Taking address of OUTER in lower_send_shared_vars
932 might need regimplification of everything that uses the
933 variable. */
934 if (!task_shared_vars)
935 task_shared_vars = BITMAP_ALLOC (NULL);
936 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
937 TREE_ADDRESSABLE (outer) = 1;
939 return true;
943 return false;
946 /* Construct a new automatic decl similar to VAR. */
948 static tree
949 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
951 tree copy = copy_var_decl (var, name, type);
953 DECL_CONTEXT (copy) = current_function_decl;
954 DECL_CHAIN (copy) = ctx->block_vars;
955 ctx->block_vars = copy;
957 return copy;
960 static tree
961 omp_copy_decl_1 (tree var, omp_context *ctx)
963 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
966 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
967 as appropriate. */
968 static tree
969 omp_build_component_ref (tree obj, tree field)
971 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
972 if (TREE_THIS_VOLATILE (field))
973 TREE_THIS_VOLATILE (ret) |= 1;
974 if (TREE_READONLY (field))
975 TREE_READONLY (ret) |= 1;
976 return ret;
979 /* Build tree nodes to access the field for VAR on the receiver side. */
981 static tree
982 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
984 tree x, field = lookup_field (var, ctx);
986 /* If the receiver record type was remapped in the child function,
987 remap the field into the new record type. */
988 x = maybe_lookup_field (field, ctx);
989 if (x != NULL)
990 field = x;
992 x = build_simple_mem_ref (ctx->receiver_decl);
993 x = omp_build_component_ref (x, field);
994 if (by_ref)
995 x = build_simple_mem_ref (x);
997 return x;
1000 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1001 of a parallel, this is a component reference; for workshare constructs
1002 this is some variable. */
1004 static tree
1005 build_outer_var_ref (tree var, omp_context *ctx)
1007 tree x;
1009 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1010 x = var;
1011 else if (is_variable_sized (var))
1013 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1014 x = build_outer_var_ref (x, ctx);
1015 x = build_simple_mem_ref (x);
1017 else if (is_taskreg_ctx (ctx))
1019 bool by_ref = use_pointer_for_field (var, NULL);
1020 x = build_receiver_ref (var, by_ref, ctx);
1022 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1023 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1025 /* #pragma omp simd isn't a worksharing construct, and can reference even
1026 private vars in its linear etc. clauses. */
1027 x = NULL_TREE;
1028 if (ctx->outer && is_taskreg_ctx (ctx))
1029 x = lookup_decl (var, ctx->outer);
1030 else if (ctx->outer)
1031 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1032 if (x == NULL_TREE)
1033 x = var;
1035 else if (ctx->outer)
1036 x = lookup_decl (var, ctx->outer);
1037 else if (is_reference (var))
1038 /* This can happen with orphaned constructs. If var is reference, it is
1039 possible it is shared and as such valid. */
1040 x = var;
1041 else
1042 gcc_unreachable ();
1044 if (is_reference (var))
1045 x = build_simple_mem_ref (x);
1047 return x;
1050 /* Build tree nodes to access the field for VAR on the sender side. */
1052 static tree
1053 build_sender_ref (tree var, omp_context *ctx)
1055 tree field = lookup_sfield (var, ctx);
1056 return omp_build_component_ref (ctx->sender_decl, field);
1059 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1061 static void
1062 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1064 tree field, type, sfield = NULL_TREE;
1066 gcc_assert ((mask & 1) == 0
1067 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1068 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1069 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1071 type = TREE_TYPE (var);
1072 if (mask & 4)
1074 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1075 type = build_pointer_type (build_pointer_type (type));
1077 else if (by_ref)
1078 type = build_pointer_type (type);
1079 else if ((mask & 3) == 1 && is_reference (var))
1080 type = TREE_TYPE (type);
1082 field = build_decl (DECL_SOURCE_LOCATION (var),
1083 FIELD_DECL, DECL_NAME (var), type);
1085 /* Remember what variable this field was created for. This does have a
1086 side effect of making dwarf2out ignore this member, so for helpful
1087 debugging we clear it later in delete_omp_context. */
1088 DECL_ABSTRACT_ORIGIN (field) = var;
1089 if (type == TREE_TYPE (var))
1091 DECL_ALIGN (field) = DECL_ALIGN (var);
1092 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1093 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1095 else
1096 DECL_ALIGN (field) = TYPE_ALIGN (type);
1098 if ((mask & 3) == 3)
1100 insert_field_into_struct (ctx->record_type, field);
1101 if (ctx->srecord_type)
1103 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1104 FIELD_DECL, DECL_NAME (var), type);
1105 DECL_ABSTRACT_ORIGIN (sfield) = var;
1106 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1107 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1108 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1109 insert_field_into_struct (ctx->srecord_type, sfield);
1112 else
1114 if (ctx->srecord_type == NULL_TREE)
1116 tree t;
1118 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1119 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1120 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1122 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1123 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1124 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1125 insert_field_into_struct (ctx->srecord_type, sfield);
1126 splay_tree_insert (ctx->sfield_map,
1127 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1128 (splay_tree_value) sfield);
1131 sfield = field;
1132 insert_field_into_struct ((mask & 1) ? ctx->record_type
1133 : ctx->srecord_type, field);
1136 if (mask & 1)
1137 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1138 (splay_tree_value) field);
1139 if ((mask & 2) && ctx->sfield_map)
1140 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1141 (splay_tree_value) sfield);
1144 static tree
1145 install_var_local (tree var, omp_context *ctx)
1147 tree new_var = omp_copy_decl_1 (var, ctx);
1148 insert_decl_map (&ctx->cb, var, new_var);
1149 return new_var;
1152 /* Adjust the replacement for DECL in CTX for the new context. This means
1153 copying the DECL_VALUE_EXPR, and fixing up the type. */
1155 static void
1156 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1158 tree new_decl, size;
1160 new_decl = lookup_decl (decl, ctx);
1162 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1164 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1165 && DECL_HAS_VALUE_EXPR_P (decl))
1167 tree ve = DECL_VALUE_EXPR (decl);
1168 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1169 SET_DECL_VALUE_EXPR (new_decl, ve);
1170 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1173 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1175 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1176 if (size == error_mark_node)
1177 size = TYPE_SIZE (TREE_TYPE (new_decl));
1178 DECL_SIZE (new_decl) = size;
1180 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1181 if (size == error_mark_node)
1182 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1183 DECL_SIZE_UNIT (new_decl) = size;
1187 /* The callback for remap_decl. Search all containing contexts for a
1188 mapping of the variable; this avoids having to duplicate the splay
1189 tree ahead of time. We know a mapping doesn't already exist in the
1190 given context. Create new mappings to implement default semantics. */
1192 static tree
1193 omp_copy_decl (tree var, copy_body_data *cb)
1195 omp_context *ctx = (omp_context *) cb;
1196 tree new_var;
1198 if (TREE_CODE (var) == LABEL_DECL)
1200 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1201 DECL_CONTEXT (new_var) = current_function_decl;
1202 insert_decl_map (&ctx->cb, var, new_var);
1203 return new_var;
1206 while (!is_taskreg_ctx (ctx))
1208 ctx = ctx->outer;
1209 if (ctx == NULL)
1210 return var;
1211 new_var = maybe_lookup_decl (var, ctx);
1212 if (new_var)
1213 return new_var;
1216 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1217 return var;
1219 return error_mark_node;
1223 /* Debugging dumps for parallel regions. */
1224 void dump_omp_region (FILE *, struct omp_region *, int);
1225 void debug_omp_region (struct omp_region *);
1226 void debug_all_omp_regions (void);
1228 /* Dump the parallel region tree rooted at REGION. */
1230 void
1231 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1233 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1234 gimple_code_name[region->type]);
1236 if (region->inner)
1237 dump_omp_region (file, region->inner, indent + 4);
1239 if (region->cont)
1241 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1242 region->cont->index);
1245 if (region->exit)
1246 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1247 region->exit->index);
1248 else
1249 fprintf (file, "%*s[no exit marker]\n", indent, "");
1251 if (region->next)
1252 dump_omp_region (file, region->next, indent);
1255 DEBUG_FUNCTION void
1256 debug_omp_region (struct omp_region *region)
1258 dump_omp_region (stderr, region, 0);
1261 DEBUG_FUNCTION void
1262 debug_all_omp_regions (void)
1264 dump_omp_region (stderr, root_omp_region, 0);
1268 /* Create a new parallel region starting at STMT inside region PARENT. */
1270 static struct omp_region *
1271 new_omp_region (basic_block bb, enum gimple_code type,
1272 struct omp_region *parent)
1274 struct omp_region *region = XCNEW (struct omp_region);
1276 region->outer = parent;
1277 region->entry = bb;
1278 region->type = type;
1280 if (parent)
1282 /* This is a nested region. Add it to the list of inner
1283 regions in PARENT. */
1284 region->next = parent->inner;
1285 parent->inner = region;
1287 else
1289 /* This is a toplevel region. Add it to the list of toplevel
1290 regions in ROOT_OMP_REGION. */
1291 region->next = root_omp_region;
1292 root_omp_region = region;
1295 return region;
1298 /* Release the memory associated with the region tree rooted at REGION. */
1300 static void
1301 free_omp_region_1 (struct omp_region *region)
1303 struct omp_region *i, *n;
1305 for (i = region->inner; i ; i = n)
1307 n = i->next;
1308 free_omp_region_1 (i);
1311 free (region);
1314 /* Release the memory for the entire omp region tree. */
1316 void
1317 free_omp_regions (void)
1319 struct omp_region *r, *n;
1320 for (r = root_omp_region; r ; r = n)
1322 n = r->next;
1323 free_omp_region_1 (r);
1325 root_omp_region = NULL;
1329 /* Create a new context, with OUTER_CTX being the surrounding context. */
1331 static omp_context *
1332 new_omp_context (gimple stmt, omp_context *outer_ctx)
1334 omp_context *ctx = XCNEW (omp_context);
1336 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1337 (splay_tree_value) ctx);
1338 ctx->stmt = stmt;
1340 if (outer_ctx)
1342 ctx->outer = outer_ctx;
1343 ctx->cb = outer_ctx->cb;
1344 ctx->cb.block = NULL;
1345 ctx->depth = outer_ctx->depth + 1;
1347 else
1349 ctx->cb.src_fn = current_function_decl;
1350 ctx->cb.dst_fn = current_function_decl;
1351 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1352 gcc_checking_assert (ctx->cb.src_node);
1353 ctx->cb.dst_node = ctx->cb.src_node;
1354 ctx->cb.src_cfun = cfun;
1355 ctx->cb.copy_decl = omp_copy_decl;
1356 ctx->cb.eh_lp_nr = 0;
1357 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1358 ctx->depth = 1;
1361 ctx->cb.decl_map = pointer_map_create ();
1363 return ctx;
1366 static gimple_seq maybe_catch_exception (gimple_seq);
1368 /* Finalize task copyfn. */
1370 static void
1371 finalize_task_copyfn (gimple task_stmt)
1373 struct function *child_cfun;
1374 tree child_fn;
1375 gimple_seq seq = NULL, new_seq;
1376 gimple bind;
1378 child_fn = gimple_omp_task_copy_fn (task_stmt);
1379 if (child_fn == NULL_TREE)
1380 return;
1382 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1383 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1385 push_cfun (child_cfun);
1386 bind = gimplify_body (child_fn, false);
1387 gimple_seq_add_stmt (&seq, bind);
1388 new_seq = maybe_catch_exception (seq);
1389 if (new_seq != seq)
1391 bind = gimple_build_bind (NULL, new_seq, NULL);
1392 seq = NULL;
1393 gimple_seq_add_stmt (&seq, bind);
1395 gimple_set_body (child_fn, seq);
1396 pop_cfun ();
1398 /* Inform the callgraph about the new function. */
1399 cgraph_add_new_function (child_fn, false);
1402 /* Destroy a omp_context data structures. Called through the splay tree
1403 value delete callback. */
1405 static void
1406 delete_omp_context (splay_tree_value value)
1408 omp_context *ctx = (omp_context *) value;
1410 pointer_map_destroy (ctx->cb.decl_map);
1412 if (ctx->field_map)
1413 splay_tree_delete (ctx->field_map);
1414 if (ctx->sfield_map)
1415 splay_tree_delete (ctx->sfield_map);
1417 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1418 it produces corrupt debug information. */
1419 if (ctx->record_type)
1421 tree t;
1422 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1423 DECL_ABSTRACT_ORIGIN (t) = NULL;
1425 if (ctx->srecord_type)
1427 tree t;
1428 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1429 DECL_ABSTRACT_ORIGIN (t) = NULL;
1432 if (is_task_ctx (ctx))
1433 finalize_task_copyfn (ctx->stmt);
1435 XDELETE (ctx);
1438 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1439 context. */
1441 static void
1442 fixup_child_record_type (omp_context *ctx)
1444 tree f, type = ctx->record_type;
1446 /* ??? It isn't sufficient to just call remap_type here, because
1447 variably_modified_type_p doesn't work the way we expect for
1448 record types. Testing each field for whether it needs remapping
1449 and creating a new record by hand works, however. */
1450 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1451 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1452 break;
1453 if (f)
1455 tree name, new_fields = NULL;
1457 type = lang_hooks.types.make_type (RECORD_TYPE);
1458 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1459 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1460 TYPE_DECL, name, type);
1461 TYPE_NAME (type) = name;
1463 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1465 tree new_f = copy_node (f);
1466 DECL_CONTEXT (new_f) = type;
1467 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1468 DECL_CHAIN (new_f) = new_fields;
1469 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1470 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1471 &ctx->cb, NULL);
1472 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1473 &ctx->cb, NULL);
1474 new_fields = new_f;
1476 /* Arrange to be able to look up the receiver field
1477 given the sender field. */
1478 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1479 (splay_tree_value) new_f);
1481 TYPE_FIELDS (type) = nreverse (new_fields);
1482 layout_type (type);
1485 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1488 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1489 specified by CLAUSES. */
1491 static void
1492 scan_sharing_clauses (tree clauses, omp_context *ctx)
1494 tree c, decl;
1495 bool scan_array_reductions = false;
1497 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1499 bool by_ref;
1501 switch (OMP_CLAUSE_CODE (c))
1503 case OMP_CLAUSE_PRIVATE:
1504 decl = OMP_CLAUSE_DECL (c);
1505 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1506 goto do_private;
1507 else if (!is_variable_sized (decl))
1508 install_var_local (decl, ctx);
1509 break;
1511 case OMP_CLAUSE_SHARED:
1512 decl = OMP_CLAUSE_DECL (c);
1513 /* Ignore shared directives in teams construct. */
1514 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1516 /* Global variables don't need to be copied,
1517 the receiver side will use them directly. */
1518 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1519 if (is_global_var (odecl))
1520 break;
1521 insert_decl_map (&ctx->cb, decl, odecl);
1522 break;
1524 gcc_assert (is_taskreg_ctx (ctx));
1525 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1526 || !is_variable_sized (decl));
1527 /* Global variables don't need to be copied,
1528 the receiver side will use them directly. */
1529 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1530 break;
1531 by_ref = use_pointer_for_field (decl, ctx);
1532 if (! TREE_READONLY (decl)
1533 || TREE_ADDRESSABLE (decl)
1534 || by_ref
1535 || is_reference (decl))
1537 install_var_field (decl, by_ref, 3, ctx);
1538 install_var_local (decl, ctx);
1539 break;
1541 /* We don't need to copy const scalar vars back. */
1542 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1543 goto do_private;
1545 case OMP_CLAUSE_LASTPRIVATE:
1546 /* Let the corresponding firstprivate clause create
1547 the variable. */
1548 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1549 break;
1550 /* FALLTHRU */
1552 case OMP_CLAUSE_FIRSTPRIVATE:
1553 case OMP_CLAUSE_REDUCTION:
1554 case OMP_CLAUSE_LINEAR:
1555 decl = OMP_CLAUSE_DECL (c);
1556 do_private:
1557 if (is_variable_sized (decl))
1559 if (is_task_ctx (ctx))
1560 install_var_field (decl, false, 1, ctx);
1561 break;
1563 else if (is_taskreg_ctx (ctx))
1565 bool global
1566 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1567 by_ref = use_pointer_for_field (decl, NULL);
1569 if (is_task_ctx (ctx)
1570 && (global || by_ref || is_reference (decl)))
1572 install_var_field (decl, false, 1, ctx);
1573 if (!global)
1574 install_var_field (decl, by_ref, 2, ctx);
1576 else if (!global)
1577 install_var_field (decl, by_ref, 3, ctx);
1579 install_var_local (decl, ctx);
1580 break;
1582 case OMP_CLAUSE__LOOPTEMP_:
1583 gcc_assert (is_parallel_ctx (ctx));
1584 decl = OMP_CLAUSE_DECL (c);
1585 install_var_field (decl, false, 3, ctx);
1586 install_var_local (decl, ctx);
1587 break;
1589 case OMP_CLAUSE_COPYPRIVATE:
1590 case OMP_CLAUSE_COPYIN:
1591 decl = OMP_CLAUSE_DECL (c);
1592 by_ref = use_pointer_for_field (decl, NULL);
1593 install_var_field (decl, by_ref, 3, ctx);
1594 break;
1596 case OMP_CLAUSE_DEFAULT:
1597 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1598 break;
1600 case OMP_CLAUSE_FINAL:
1601 case OMP_CLAUSE_IF:
1602 case OMP_CLAUSE_NUM_THREADS:
1603 case OMP_CLAUSE_NUM_TEAMS:
1604 case OMP_CLAUSE_THREAD_LIMIT:
1605 case OMP_CLAUSE_DEVICE:
1606 case OMP_CLAUSE_SCHEDULE:
1607 case OMP_CLAUSE_DIST_SCHEDULE:
1608 case OMP_CLAUSE_DEPEND:
1609 if (ctx->outer)
1610 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1611 break;
1613 case OMP_CLAUSE_TO:
1614 case OMP_CLAUSE_FROM:
1615 case OMP_CLAUSE_MAP:
1616 if (ctx->outer)
1617 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1618 decl = OMP_CLAUSE_DECL (c);
1619 /* Global variables with "omp declare target" attribute
1620 don't need to be copied, the receiver side will use them
1621 directly. */
1622 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1623 && DECL_P (decl)
1624 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1625 && lookup_attribute ("omp declare target",
1626 DECL_ATTRIBUTES (decl)))
1627 break;
1628 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1629 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1631 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1632 #pragma omp target data, there is nothing to map for
1633 those. */
1634 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1635 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1636 break;
1638 if (DECL_P (decl))
1640 if (DECL_SIZE (decl)
1641 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1643 tree decl2 = DECL_VALUE_EXPR (decl);
1644 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1645 decl2 = TREE_OPERAND (decl2, 0);
1646 gcc_assert (DECL_P (decl2));
1647 install_var_field (decl2, true, 3, ctx);
1648 install_var_local (decl2, ctx);
1649 install_var_local (decl, ctx);
1651 else
1653 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1654 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1655 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1656 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1657 install_var_field (decl, true, 7, ctx);
1658 else
1659 install_var_field (decl, true, 3, ctx);
1660 if (gimple_omp_target_kind (ctx->stmt)
1661 == GF_OMP_TARGET_KIND_REGION)
1662 install_var_local (decl, ctx);
1665 else
1667 tree base = get_base_address (decl);
1668 tree nc = OMP_CLAUSE_CHAIN (c);
1669 if (DECL_P (base)
1670 && nc != NULL_TREE
1671 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1672 && OMP_CLAUSE_DECL (nc) == base
1673 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1674 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1676 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1677 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1679 else
1681 if (ctx->outer)
1683 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1684 decl = OMP_CLAUSE_DECL (c);
1686 gcc_assert (!splay_tree_lookup (ctx->field_map,
1687 (splay_tree_key) decl));
1688 tree field
1689 = build_decl (OMP_CLAUSE_LOCATION (c),
1690 FIELD_DECL, NULL_TREE, ptr_type_node);
1691 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1692 insert_field_into_struct (ctx->record_type, field);
1693 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1694 (splay_tree_value) field);
1697 break;
1699 case OMP_CLAUSE_NOWAIT:
1700 case OMP_CLAUSE_ORDERED:
1701 case OMP_CLAUSE_COLLAPSE:
1702 case OMP_CLAUSE_UNTIED:
1703 case OMP_CLAUSE_MERGEABLE:
1704 case OMP_CLAUSE_PROC_BIND:
1705 case OMP_CLAUSE_SAFELEN:
1706 break;
1708 case OMP_CLAUSE_ALIGNED:
1709 decl = OMP_CLAUSE_DECL (c);
1710 if (is_global_var (decl)
1711 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1712 install_var_local (decl, ctx);
1713 break;
1715 default:
1716 gcc_unreachable ();
1720 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1722 switch (OMP_CLAUSE_CODE (c))
1724 case OMP_CLAUSE_LASTPRIVATE:
1725 /* Let the corresponding firstprivate clause create
1726 the variable. */
1727 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1728 scan_array_reductions = true;
1729 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1730 break;
1731 /* FALLTHRU */
1733 case OMP_CLAUSE_PRIVATE:
1734 case OMP_CLAUSE_FIRSTPRIVATE:
1735 case OMP_CLAUSE_REDUCTION:
1736 case OMP_CLAUSE_LINEAR:
1737 decl = OMP_CLAUSE_DECL (c);
1738 if (is_variable_sized (decl))
1739 install_var_local (decl, ctx);
1740 fixup_remapped_decl (decl, ctx,
1741 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1742 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1743 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1744 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1745 scan_array_reductions = true;
1746 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1747 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1748 scan_array_reductions = true;
1749 break;
1751 case OMP_CLAUSE_SHARED:
1752 /* Ignore shared directives in teams construct. */
1753 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1754 break;
1755 decl = OMP_CLAUSE_DECL (c);
1756 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1757 fixup_remapped_decl (decl, ctx, false);
1758 break;
1760 case OMP_CLAUSE_MAP:
1761 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1762 break;
1763 decl = OMP_CLAUSE_DECL (c);
1764 if (DECL_P (decl)
1765 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1766 && lookup_attribute ("omp declare target",
1767 DECL_ATTRIBUTES (decl)))
1768 break;
1769 if (DECL_P (decl))
1771 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1772 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1773 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1775 tree new_decl = lookup_decl (decl, ctx);
1776 TREE_TYPE (new_decl)
1777 = remap_type (TREE_TYPE (decl), &ctx->cb);
1779 else if (DECL_SIZE (decl)
1780 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1782 tree decl2 = DECL_VALUE_EXPR (decl);
1783 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1784 decl2 = TREE_OPERAND (decl2, 0);
1785 gcc_assert (DECL_P (decl2));
1786 fixup_remapped_decl (decl2, ctx, false);
1787 fixup_remapped_decl (decl, ctx, true);
1789 else
1790 fixup_remapped_decl (decl, ctx, false);
1792 break;
1794 case OMP_CLAUSE_COPYPRIVATE:
1795 case OMP_CLAUSE_COPYIN:
1796 case OMP_CLAUSE_DEFAULT:
1797 case OMP_CLAUSE_IF:
1798 case OMP_CLAUSE_NUM_THREADS:
1799 case OMP_CLAUSE_NUM_TEAMS:
1800 case OMP_CLAUSE_THREAD_LIMIT:
1801 case OMP_CLAUSE_DEVICE:
1802 case OMP_CLAUSE_SCHEDULE:
1803 case OMP_CLAUSE_DIST_SCHEDULE:
1804 case OMP_CLAUSE_NOWAIT:
1805 case OMP_CLAUSE_ORDERED:
1806 case OMP_CLAUSE_COLLAPSE:
1807 case OMP_CLAUSE_UNTIED:
1808 case OMP_CLAUSE_FINAL:
1809 case OMP_CLAUSE_MERGEABLE:
1810 case OMP_CLAUSE_PROC_BIND:
1811 case OMP_CLAUSE_SAFELEN:
1812 case OMP_CLAUSE_ALIGNED:
1813 case OMP_CLAUSE_DEPEND:
1814 case OMP_CLAUSE__LOOPTEMP_:
1815 case OMP_CLAUSE_TO:
1816 case OMP_CLAUSE_FROM:
1817 break;
1819 default:
1820 gcc_unreachable ();
1824 if (scan_array_reductions)
1825 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1826 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1827 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1829 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1830 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1832 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1833 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1834 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1835 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1836 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1837 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1840 /* Create a new name for omp child function. Returns an identifier. */
1842 static tree
1843 create_omp_child_function_name (bool task_copy)
1845 return (clone_function_name (current_function_decl,
1846 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1849 /* Build a decl for the omp child function. It'll not contain a body
1850 yet, just the bare decl. */
1852 static void
1853 create_omp_child_function (omp_context *ctx, bool task_copy)
1855 tree decl, type, name, t;
1857 name = create_omp_child_function_name (task_copy);
1858 if (task_copy)
1859 type = build_function_type_list (void_type_node, ptr_type_node,
1860 ptr_type_node, NULL_TREE);
1861 else
1862 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1864 decl = build_decl (gimple_location (ctx->stmt),
1865 FUNCTION_DECL, name, type);
1867 if (!task_copy)
1868 ctx->cb.dst_fn = decl;
1869 else
1870 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1872 TREE_STATIC (decl) = 1;
1873 TREE_USED (decl) = 1;
1874 DECL_ARTIFICIAL (decl) = 1;
1875 DECL_NAMELESS (decl) = 1;
1876 DECL_IGNORED_P (decl) = 0;
1877 TREE_PUBLIC (decl) = 0;
1878 DECL_UNINLINABLE (decl) = 1;
1879 DECL_EXTERNAL (decl) = 0;
1880 DECL_CONTEXT (decl) = NULL_TREE;
1881 DECL_INITIAL (decl) = make_node (BLOCK);
1882 bool target_p = false;
1883 if (lookup_attribute ("omp declare target",
1884 DECL_ATTRIBUTES (current_function_decl)))
1885 target_p = true;
1886 else
1888 omp_context *octx;
1889 for (octx = ctx; octx; octx = octx->outer)
1890 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1891 && gimple_omp_target_kind (octx->stmt)
1892 == GF_OMP_TARGET_KIND_REGION)
1894 target_p = true;
1895 break;
1898 if (target_p)
1899 DECL_ATTRIBUTES (decl)
1900 = tree_cons (get_identifier ("omp declare target"),
1901 NULL_TREE, DECL_ATTRIBUTES (decl));
1903 t = build_decl (DECL_SOURCE_LOCATION (decl),
1904 RESULT_DECL, NULL_TREE, void_type_node);
1905 DECL_ARTIFICIAL (t) = 1;
1906 DECL_IGNORED_P (t) = 1;
1907 DECL_CONTEXT (t) = decl;
1908 DECL_RESULT (decl) = t;
1910 t = build_decl (DECL_SOURCE_LOCATION (decl),
1911 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1912 DECL_ARTIFICIAL (t) = 1;
1913 DECL_NAMELESS (t) = 1;
1914 DECL_ARG_TYPE (t) = ptr_type_node;
1915 DECL_CONTEXT (t) = current_function_decl;
1916 TREE_USED (t) = 1;
1917 DECL_ARGUMENTS (decl) = t;
1918 if (!task_copy)
1919 ctx->receiver_decl = t;
1920 else
1922 t = build_decl (DECL_SOURCE_LOCATION (decl),
1923 PARM_DECL, get_identifier (".omp_data_o"),
1924 ptr_type_node);
1925 DECL_ARTIFICIAL (t) = 1;
1926 DECL_NAMELESS (t) = 1;
1927 DECL_ARG_TYPE (t) = ptr_type_node;
1928 DECL_CONTEXT (t) = current_function_decl;
1929 TREE_USED (t) = 1;
1930 TREE_ADDRESSABLE (t) = 1;
1931 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1932 DECL_ARGUMENTS (decl) = t;
1935 /* Allocate memory for the function structure. The call to
1936 allocate_struct_function clobbers CFUN, so we need to restore
1937 it afterward. */
1938 push_struct_function (decl);
1939 cfun->function_end_locus = gimple_location (ctx->stmt);
1940 pop_cfun ();
1943 /* Callback for walk_gimple_seq. Check if combined parallel
1944 contains gimple_omp_for_combined_into_p OMP_FOR. */
1946 static tree
1947 find_combined_for (gimple_stmt_iterator *gsi_p,
1948 bool *handled_ops_p,
1949 struct walk_stmt_info *wi)
1951 gimple stmt = gsi_stmt (*gsi_p);
1953 *handled_ops_p = true;
1954 switch (gimple_code (stmt))
1956 WALK_SUBSTMTS;
1958 case GIMPLE_OMP_FOR:
1959 if (gimple_omp_for_combined_into_p (stmt)
1960 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
1962 wi->info = stmt;
1963 return integer_zero_node;
1965 break;
1966 default:
1967 break;
1969 return NULL;
1972 /* Scan an OpenMP parallel directive. */
1974 static void
1975 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1977 omp_context *ctx;
1978 tree name;
1979 gimple stmt = gsi_stmt (*gsi);
1981 /* Ignore parallel directives with empty bodies, unless there
1982 are copyin clauses. */
1983 if (optimize > 0
1984 && empty_body_p (gimple_omp_body (stmt))
1985 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1986 OMP_CLAUSE_COPYIN) == NULL)
1988 gsi_replace (gsi, gimple_build_nop (), false);
1989 return;
1992 if (gimple_omp_parallel_combined_p (stmt))
1994 gimple for_stmt;
1995 struct walk_stmt_info wi;
1997 memset (&wi, 0, sizeof (wi));
1998 wi.val_only = true;
1999 walk_gimple_seq (gimple_omp_body (stmt),
2000 find_combined_for, NULL, &wi);
2001 for_stmt = (gimple) wi.info;
2002 if (for_stmt)
2004 struct omp_for_data fd;
2005 extract_omp_for_data (for_stmt, &fd, NULL);
2006 /* We need two temporaries with fd.loop.v type (istart/iend)
2007 and then (fd.collapse - 1) temporaries with the same
2008 type for count2 ... countN-1 vars if not constant. */
2009 size_t count = 2, i;
2010 tree type = fd.iter_type;
2011 if (fd.collapse > 1
2012 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2013 count += fd.collapse - 1;
2014 for (i = 0; i < count; i++)
2016 tree temp = create_tmp_var (type, NULL);
2017 tree c = build_omp_clause (UNKNOWN_LOCATION,
2018 OMP_CLAUSE__LOOPTEMP_);
2019 insert_decl_map (&outer_ctx->cb, temp, temp);
2020 OMP_CLAUSE_DECL (c) = temp;
2021 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2022 gimple_omp_parallel_set_clauses (stmt, c);
2027 ctx = new_omp_context (stmt, outer_ctx);
2028 if (taskreg_nesting_level > 1)
2029 ctx->is_nested = true;
2030 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2031 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2032 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2033 name = create_tmp_var_name (".omp_data_s");
2034 name = build_decl (gimple_location (stmt),
2035 TYPE_DECL, name, ctx->record_type);
2036 DECL_ARTIFICIAL (name) = 1;
2037 DECL_NAMELESS (name) = 1;
2038 TYPE_NAME (ctx->record_type) = name;
2039 create_omp_child_function (ctx, false);
2040 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2042 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2043 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2045 if (TYPE_FIELDS (ctx->record_type) == NULL)
2046 ctx->record_type = ctx->receiver_decl = NULL;
2047 else
2049 layout_type (ctx->record_type);
2050 fixup_child_record_type (ctx);
2054 /* Scan an OpenMP task directive. */
2056 static void
2057 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2059 omp_context *ctx;
2060 tree name, t;
2061 gimple stmt = gsi_stmt (*gsi);
2062 location_t loc = gimple_location (stmt);
2064 /* Ignore task directives with empty bodies. */
2065 if (optimize > 0
2066 && empty_body_p (gimple_omp_body (stmt)))
2068 gsi_replace (gsi, gimple_build_nop (), false);
2069 return;
2072 ctx = new_omp_context (stmt, outer_ctx);
2073 if (taskreg_nesting_level > 1)
2074 ctx->is_nested = true;
2075 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2076 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2077 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2078 name = create_tmp_var_name (".omp_data_s");
2079 name = build_decl (gimple_location (stmt),
2080 TYPE_DECL, name, ctx->record_type);
2081 DECL_ARTIFICIAL (name) = 1;
2082 DECL_NAMELESS (name) = 1;
2083 TYPE_NAME (ctx->record_type) = name;
2084 create_omp_child_function (ctx, false);
2085 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2087 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2089 if (ctx->srecord_type)
2091 name = create_tmp_var_name (".omp_data_a");
2092 name = build_decl (gimple_location (stmt),
2093 TYPE_DECL, name, ctx->srecord_type);
2094 DECL_ARTIFICIAL (name) = 1;
2095 DECL_NAMELESS (name) = 1;
2096 TYPE_NAME (ctx->srecord_type) = name;
2097 create_omp_child_function (ctx, true);
2100 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2102 if (TYPE_FIELDS (ctx->record_type) == NULL)
2104 ctx->record_type = ctx->receiver_decl = NULL;
2105 t = build_int_cst (long_integer_type_node, 0);
2106 gimple_omp_task_set_arg_size (stmt, t);
2107 t = build_int_cst (long_integer_type_node, 1);
2108 gimple_omp_task_set_arg_align (stmt, t);
2110 else
2112 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2113 /* Move VLA fields to the end. */
2114 p = &TYPE_FIELDS (ctx->record_type);
2115 while (*p)
2116 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2117 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2119 *q = *p;
2120 *p = TREE_CHAIN (*p);
2121 TREE_CHAIN (*q) = NULL_TREE;
2122 q = &TREE_CHAIN (*q);
2124 else
2125 p = &DECL_CHAIN (*p);
2126 *p = vla_fields;
2127 layout_type (ctx->record_type);
2128 fixup_child_record_type (ctx);
2129 if (ctx->srecord_type)
2130 layout_type (ctx->srecord_type);
2131 t = fold_convert_loc (loc, long_integer_type_node,
2132 TYPE_SIZE_UNIT (ctx->record_type));
2133 gimple_omp_task_set_arg_size (stmt, t);
2134 t = build_int_cst (long_integer_type_node,
2135 TYPE_ALIGN_UNIT (ctx->record_type));
2136 gimple_omp_task_set_arg_align (stmt, t);
2141 /* Scan an OpenMP loop directive. */
2143 static void
2144 scan_omp_for (gimple stmt, omp_context *outer_ctx)
2146 omp_context *ctx;
2147 size_t i;
2149 ctx = new_omp_context (stmt, outer_ctx);
2151 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2153 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2154 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2156 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2157 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2158 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2159 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2161 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2164 /* Scan an OpenMP sections directive. */
2166 static void
2167 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
2169 omp_context *ctx;
2171 ctx = new_omp_context (stmt, outer_ctx);
2172 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2173 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2176 /* Scan an OpenMP single directive. */
2178 static void
2179 scan_omp_single (gimple stmt, omp_context *outer_ctx)
2181 omp_context *ctx;
2182 tree name;
2184 ctx = new_omp_context (stmt, outer_ctx);
2185 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2186 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2187 name = create_tmp_var_name (".omp_copy_s");
2188 name = build_decl (gimple_location (stmt),
2189 TYPE_DECL, name, ctx->record_type);
2190 TYPE_NAME (ctx->record_type) = name;
2192 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2193 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2195 if (TYPE_FIELDS (ctx->record_type) == NULL)
2196 ctx->record_type = NULL;
2197 else
2198 layout_type (ctx->record_type);
2201 /* Scan an OpenMP target{, data, update} directive. */
2203 static void
2204 scan_omp_target (gimple stmt, omp_context *outer_ctx)
2206 omp_context *ctx;
2207 tree name;
2208 int kind = gimple_omp_target_kind (stmt);
2210 ctx = new_omp_context (stmt, outer_ctx);
2211 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2212 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2213 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2214 name = create_tmp_var_name (".omp_data_t");
2215 name = build_decl (gimple_location (stmt),
2216 TYPE_DECL, name, ctx->record_type);
2217 DECL_ARTIFICIAL (name) = 1;
2218 DECL_NAMELESS (name) = 1;
2219 TYPE_NAME (ctx->record_type) = name;
2220 if (kind == GF_OMP_TARGET_KIND_REGION)
2222 create_omp_child_function (ctx, false);
2223 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2226 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2227 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2229 if (TYPE_FIELDS (ctx->record_type) == NULL)
2230 ctx->record_type = ctx->receiver_decl = NULL;
2231 else
2233 TYPE_FIELDS (ctx->record_type)
2234 = nreverse (TYPE_FIELDS (ctx->record_type));
2235 #ifdef ENABLE_CHECKING
2236 tree field;
2237 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2238 for (field = TYPE_FIELDS (ctx->record_type);
2239 field;
2240 field = DECL_CHAIN (field))
2241 gcc_assert (DECL_ALIGN (field) == align);
2242 #endif
2243 layout_type (ctx->record_type);
2244 if (kind == GF_OMP_TARGET_KIND_REGION)
2245 fixup_child_record_type (ctx);
2249 /* Scan an OpenMP teams directive. */
2251 static void
2252 scan_omp_teams (gimple stmt, omp_context *outer_ctx)
2254 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2255 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2256 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2259 /* Check OpenMP nesting restrictions. */
2260 static bool
2261 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2263 if (ctx != NULL)
2265 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2266 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2268 error_at (gimple_location (stmt),
2269 "OpenMP constructs may not be nested inside simd region");
2270 return false;
2272 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2274 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2275 || (gimple_omp_for_kind (stmt)
2276 != GF_OMP_FOR_KIND_DISTRIBUTE))
2277 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2279 error_at (gimple_location (stmt),
2280 "only distribute or parallel constructs are allowed to "
2281 "be closely nested inside teams construct");
2282 return false;
2286 switch (gimple_code (stmt))
2288 case GIMPLE_OMP_FOR:
2289 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2290 return true;
2291 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2293 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2295 error_at (gimple_location (stmt),
2296 "distribute construct must be closely nested inside "
2297 "teams construct");
2298 return false;
2300 return true;
2302 /* FALLTHRU */
2303 case GIMPLE_CALL:
2304 if (is_gimple_call (stmt)
2305 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2306 == BUILT_IN_GOMP_CANCEL
2307 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2308 == BUILT_IN_GOMP_CANCELLATION_POINT))
2310 const char *bad = NULL;
2311 const char *kind = NULL;
2312 if (ctx == NULL)
2314 error_at (gimple_location (stmt), "orphaned %qs construct",
2315 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2316 == BUILT_IN_GOMP_CANCEL
2317 ? "#pragma omp cancel"
2318 : "#pragma omp cancellation point");
2319 return false;
2321 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2322 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2323 : 0)
2325 case 1:
2326 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2327 bad = "#pragma omp parallel";
2328 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2329 == BUILT_IN_GOMP_CANCEL
2330 && !integer_zerop (gimple_call_arg (stmt, 1)))
2331 ctx->cancellable = true;
2332 kind = "parallel";
2333 break;
2334 case 2:
2335 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2336 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2337 bad = "#pragma omp for";
2338 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2339 == BUILT_IN_GOMP_CANCEL
2340 && !integer_zerop (gimple_call_arg (stmt, 1)))
2342 ctx->cancellable = true;
2343 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2344 OMP_CLAUSE_NOWAIT))
2345 warning_at (gimple_location (stmt), 0,
2346 "%<#pragma omp cancel for%> inside "
2347 "%<nowait%> for construct");
2348 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2349 OMP_CLAUSE_ORDERED))
2350 warning_at (gimple_location (stmt), 0,
2351 "%<#pragma omp cancel for%> inside "
2352 "%<ordered%> for construct");
2354 kind = "for";
2355 break;
2356 case 4:
2357 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2358 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2359 bad = "#pragma omp sections";
2360 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2361 == BUILT_IN_GOMP_CANCEL
2362 && !integer_zerop (gimple_call_arg (stmt, 1)))
2364 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2366 ctx->cancellable = true;
2367 if (find_omp_clause (gimple_omp_sections_clauses
2368 (ctx->stmt),
2369 OMP_CLAUSE_NOWAIT))
2370 warning_at (gimple_location (stmt), 0,
2371 "%<#pragma omp cancel sections%> inside "
2372 "%<nowait%> sections construct");
2374 else
2376 gcc_assert (ctx->outer
2377 && gimple_code (ctx->outer->stmt)
2378 == GIMPLE_OMP_SECTIONS);
2379 ctx->outer->cancellable = true;
2380 if (find_omp_clause (gimple_omp_sections_clauses
2381 (ctx->outer->stmt),
2382 OMP_CLAUSE_NOWAIT))
2383 warning_at (gimple_location (stmt), 0,
2384 "%<#pragma omp cancel sections%> inside "
2385 "%<nowait%> sections construct");
2388 kind = "sections";
2389 break;
2390 case 8:
2391 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2392 bad = "#pragma omp task";
2393 else
2394 ctx->cancellable = true;
2395 kind = "taskgroup";
2396 break;
2397 default:
2398 error_at (gimple_location (stmt), "invalid arguments");
2399 return false;
2401 if (bad)
2403 error_at (gimple_location (stmt),
2404 "%<%s %s%> construct not closely nested inside of %qs",
2405 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2406 == BUILT_IN_GOMP_CANCEL
2407 ? "#pragma omp cancel"
2408 : "#pragma omp cancellation point", kind, bad);
2409 return false;
2412 /* FALLTHRU */
2413 case GIMPLE_OMP_SECTIONS:
2414 case GIMPLE_OMP_SINGLE:
2415 for (; ctx != NULL; ctx = ctx->outer)
2416 switch (gimple_code (ctx->stmt))
2418 case GIMPLE_OMP_FOR:
2419 case GIMPLE_OMP_SECTIONS:
2420 case GIMPLE_OMP_SINGLE:
2421 case GIMPLE_OMP_ORDERED:
2422 case GIMPLE_OMP_MASTER:
2423 case GIMPLE_OMP_TASK:
2424 case GIMPLE_OMP_CRITICAL:
2425 if (is_gimple_call (stmt))
2427 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2428 != BUILT_IN_GOMP_BARRIER)
2429 return true;
2430 error_at (gimple_location (stmt),
2431 "barrier region may not be closely nested inside "
2432 "of work-sharing, critical, ordered, master or "
2433 "explicit task region");
2434 return false;
2436 error_at (gimple_location (stmt),
2437 "work-sharing region may not be closely nested inside "
2438 "of work-sharing, critical, ordered, master or explicit "
2439 "task region");
2440 return false;
2441 case GIMPLE_OMP_PARALLEL:
2442 return true;
2443 default:
2444 break;
2446 break;
2447 case GIMPLE_OMP_MASTER:
2448 for (; ctx != NULL; ctx = ctx->outer)
2449 switch (gimple_code (ctx->stmt))
2451 case GIMPLE_OMP_FOR:
2452 case GIMPLE_OMP_SECTIONS:
2453 case GIMPLE_OMP_SINGLE:
2454 case GIMPLE_OMP_TASK:
2455 error_at (gimple_location (stmt),
2456 "master region may not be closely nested inside "
2457 "of work-sharing or explicit task region");
2458 return false;
2459 case GIMPLE_OMP_PARALLEL:
2460 return true;
2461 default:
2462 break;
2464 break;
2465 case GIMPLE_OMP_ORDERED:
2466 for (; ctx != NULL; ctx = ctx->outer)
2467 switch (gimple_code (ctx->stmt))
2469 case GIMPLE_OMP_CRITICAL:
2470 case GIMPLE_OMP_TASK:
2471 error_at (gimple_location (stmt),
2472 "ordered region may not be closely nested inside "
2473 "of critical or explicit task region");
2474 return false;
2475 case GIMPLE_OMP_FOR:
2476 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2477 OMP_CLAUSE_ORDERED) == NULL)
2479 error_at (gimple_location (stmt),
2480 "ordered region must be closely nested inside "
2481 "a loop region with an ordered clause");
2482 return false;
2484 return true;
2485 case GIMPLE_OMP_PARALLEL:
2486 error_at (gimple_location (stmt),
2487 "ordered region must be closely nested inside "
2488 "a loop region with an ordered clause");
2489 return false;
2490 default:
2491 break;
2493 break;
2494 case GIMPLE_OMP_CRITICAL:
2495 for (; ctx != NULL; ctx = ctx->outer)
2496 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
2497 && (gimple_omp_critical_name (stmt)
2498 == gimple_omp_critical_name (ctx->stmt)))
2500 error_at (gimple_location (stmt),
2501 "critical region may not be nested inside a critical "
2502 "region with the same name");
2503 return false;
2505 break;
2506 case GIMPLE_OMP_TEAMS:
2507 if (ctx == NULL
2508 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2509 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2511 error_at (gimple_location (stmt),
2512 "teams construct not closely nested inside of target "
2513 "region");
2514 return false;
2516 break;
2517 case GIMPLE_OMP_TARGET:
2518 for (; ctx != NULL; ctx = ctx->outer)
2519 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
2520 && gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_REGION)
2522 const char *name;
2523 switch (gimple_omp_target_kind (stmt))
2525 case GF_OMP_TARGET_KIND_REGION: name = "target"; break;
2526 case GF_OMP_TARGET_KIND_DATA: name = "target data"; break;
2527 case GF_OMP_TARGET_KIND_UPDATE: name = "target update"; break;
2528 default: gcc_unreachable ();
2530 warning_at (gimple_location (stmt), 0,
2531 "%s construct inside of target region", name);
2533 break;
2534 default:
2535 break;
2537 return true;
2541 /* Helper function scan_omp.
2543 Callback for walk_tree or operators in walk_gimple_stmt used to
2544 scan for OpenMP directives in TP. */
2546 static tree
2547 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2549 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2550 omp_context *ctx = (omp_context *) wi->info;
2551 tree t = *tp;
2553 switch (TREE_CODE (t))
2555 case VAR_DECL:
2556 case PARM_DECL:
2557 case LABEL_DECL:
2558 case RESULT_DECL:
2559 if (ctx)
2560 *tp = remap_decl (t, &ctx->cb);
2561 break;
2563 default:
2564 if (ctx && TYPE_P (t))
2565 *tp = remap_type (t, &ctx->cb);
2566 else if (!DECL_P (t))
2568 *walk_subtrees = 1;
2569 if (ctx)
2571 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2572 if (tem != TREE_TYPE (t))
2574 if (TREE_CODE (t) == INTEGER_CST)
2575 *tp = wide_int_to_tree (tem, t);
2576 else
2577 TREE_TYPE (t) = tem;
2581 break;
2584 return NULL_TREE;
2587 /* Return true if FNDECL is a setjmp or a longjmp. */
2589 static bool
2590 setjmp_or_longjmp_p (const_tree fndecl)
2592 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2593 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2594 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2595 return true;
2597 tree declname = DECL_NAME (fndecl);
2598 if (!declname)
2599 return false;
2600 const char *name = IDENTIFIER_POINTER (declname);
2601 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2605 /* Helper function for scan_omp.
2607 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2608 the current statement in GSI. */
2610 static tree
2611 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2612 struct walk_stmt_info *wi)
2614 gimple stmt = gsi_stmt (*gsi);
2615 omp_context *ctx = (omp_context *) wi->info;
2617 if (gimple_has_location (stmt))
2618 input_location = gimple_location (stmt);
2620 /* Check the OpenMP nesting restrictions. */
2621 bool remove = false;
2622 if (is_gimple_omp (stmt))
2623 remove = !check_omp_nesting_restrictions (stmt, ctx);
2624 else if (is_gimple_call (stmt))
2626 tree fndecl = gimple_call_fndecl (stmt);
2627 if (fndecl)
2629 if (setjmp_or_longjmp_p (fndecl)
2630 && ctx
2631 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2632 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2634 remove = true;
2635 error_at (gimple_location (stmt),
2636 "setjmp/longjmp inside simd construct");
2638 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2639 switch (DECL_FUNCTION_CODE (fndecl))
2641 case BUILT_IN_GOMP_BARRIER:
2642 case BUILT_IN_GOMP_CANCEL:
2643 case BUILT_IN_GOMP_CANCELLATION_POINT:
2644 case BUILT_IN_GOMP_TASKYIELD:
2645 case BUILT_IN_GOMP_TASKWAIT:
2646 case BUILT_IN_GOMP_TASKGROUP_START:
2647 case BUILT_IN_GOMP_TASKGROUP_END:
2648 remove = !check_omp_nesting_restrictions (stmt, ctx);
2649 break;
2650 default:
2651 break;
2655 if (remove)
2657 stmt = gimple_build_nop ();
2658 gsi_replace (gsi, stmt, false);
2661 *handled_ops_p = true;
2663 switch (gimple_code (stmt))
2665 case GIMPLE_OMP_PARALLEL:
2666 taskreg_nesting_level++;
2667 scan_omp_parallel (gsi, ctx);
2668 taskreg_nesting_level--;
2669 break;
2671 case GIMPLE_OMP_TASK:
2672 taskreg_nesting_level++;
2673 scan_omp_task (gsi, ctx);
2674 taskreg_nesting_level--;
2675 break;
2677 case GIMPLE_OMP_FOR:
2678 scan_omp_for (stmt, ctx);
2679 break;
2681 case GIMPLE_OMP_SECTIONS:
2682 scan_omp_sections (stmt, ctx);
2683 break;
2685 case GIMPLE_OMP_SINGLE:
2686 scan_omp_single (stmt, ctx);
2687 break;
2689 case GIMPLE_OMP_SECTION:
2690 case GIMPLE_OMP_MASTER:
2691 case GIMPLE_OMP_TASKGROUP:
2692 case GIMPLE_OMP_ORDERED:
2693 case GIMPLE_OMP_CRITICAL:
2694 ctx = new_omp_context (stmt, ctx);
2695 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2696 break;
2698 case GIMPLE_OMP_TARGET:
2699 scan_omp_target (stmt, ctx);
2700 break;
2702 case GIMPLE_OMP_TEAMS:
2703 scan_omp_teams (stmt, ctx);
2704 break;
2706 case GIMPLE_BIND:
2708 tree var;
2710 *handled_ops_p = false;
2711 if (ctx)
2712 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2713 insert_decl_map (&ctx->cb, var, var);
2715 break;
2716 default:
2717 *handled_ops_p = false;
2718 break;
2721 return NULL_TREE;
2725 /* Scan all the statements starting at the current statement. CTX
2726 contains context information about the OpenMP directives and
2727 clauses found during the scan. */
2729 static void
2730 scan_omp (gimple_seq *body_p, omp_context *ctx)
2732 location_t saved_location;
2733 struct walk_stmt_info wi;
2735 memset (&wi, 0, sizeof (wi));
2736 wi.info = ctx;
2737 wi.want_locations = true;
2739 saved_location = input_location;
2740 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2741 input_location = saved_location;
2744 /* Re-gimplification and code generation routines. */
2746 /* Build a call to GOMP_barrier. */
2748 static gimple
2749 build_omp_barrier (tree lhs)
2751 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2752 : BUILT_IN_GOMP_BARRIER);
2753 gimple g = gimple_build_call (fndecl, 0);
2754 if (lhs)
2755 gimple_call_set_lhs (g, lhs);
2756 return g;
2759 /* If a context was created for STMT when it was scanned, return it. */
2761 static omp_context *
2762 maybe_lookup_ctx (gimple stmt)
2764 splay_tree_node n;
2765 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2766 return n ? (omp_context *) n->value : NULL;
2770 /* Find the mapping for DECL in CTX or the immediately enclosing
2771 context that has a mapping for DECL.
2773 If CTX is a nested parallel directive, we may have to use the decl
2774 mappings created in CTX's parent context. Suppose that we have the
2775 following parallel nesting (variable UIDs showed for clarity):
2777 iD.1562 = 0;
2778 #omp parallel shared(iD.1562) -> outer parallel
2779 iD.1562 = iD.1562 + 1;
2781 #omp parallel shared (iD.1562) -> inner parallel
2782 iD.1562 = iD.1562 - 1;
2784 Each parallel structure will create a distinct .omp_data_s structure
2785 for copying iD.1562 in/out of the directive:
2787 outer parallel .omp_data_s.1.i -> iD.1562
2788 inner parallel .omp_data_s.2.i -> iD.1562
2790 A shared variable mapping will produce a copy-out operation before
2791 the parallel directive and a copy-in operation after it. So, in
2792 this case we would have:
2794 iD.1562 = 0;
2795 .omp_data_o.1.i = iD.1562;
2796 #omp parallel shared(iD.1562) -> outer parallel
2797 .omp_data_i.1 = &.omp_data_o.1
2798 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2800 .omp_data_o.2.i = iD.1562; -> **
2801 #omp parallel shared(iD.1562) -> inner parallel
2802 .omp_data_i.2 = &.omp_data_o.2
2803 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2806 ** This is a problem. The symbol iD.1562 cannot be referenced
2807 inside the body of the outer parallel region. But since we are
2808 emitting this copy operation while expanding the inner parallel
2809 directive, we need to access the CTX structure of the outer
2810 parallel directive to get the correct mapping:
2812 .omp_data_o.2.i = .omp_data_i.1->i
2814 Since there may be other workshare or parallel directives enclosing
2815 the parallel directive, it may be necessary to walk up the context
2816 parent chain. This is not a problem in general because nested
2817 parallelism happens only rarely. */
2819 static tree
2820 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2822 tree t;
2823 omp_context *up;
2825 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2826 t = maybe_lookup_decl (decl, up);
2828 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2830 return t ? t : decl;
2834 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2835 in outer contexts. */
2837 static tree
2838 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2840 tree t = NULL;
2841 omp_context *up;
2843 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2844 t = maybe_lookup_decl (decl, up);
2846 return t ? t : decl;
2850 /* Construct the initialization value for reduction CLAUSE. */
2852 tree
2853 omp_reduction_init (tree clause, tree type)
2855 location_t loc = OMP_CLAUSE_LOCATION (clause);
2856 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2858 case PLUS_EXPR:
2859 case MINUS_EXPR:
2860 case BIT_IOR_EXPR:
2861 case BIT_XOR_EXPR:
2862 case TRUTH_OR_EXPR:
2863 case TRUTH_ORIF_EXPR:
2864 case TRUTH_XOR_EXPR:
2865 case NE_EXPR:
2866 return build_zero_cst (type);
2868 case MULT_EXPR:
2869 case TRUTH_AND_EXPR:
2870 case TRUTH_ANDIF_EXPR:
2871 case EQ_EXPR:
2872 return fold_convert_loc (loc, type, integer_one_node);
2874 case BIT_AND_EXPR:
2875 return fold_convert_loc (loc, type, integer_minus_one_node);
2877 case MAX_EXPR:
2878 if (SCALAR_FLOAT_TYPE_P (type))
2880 REAL_VALUE_TYPE max, min;
2881 if (HONOR_INFINITIES (TYPE_MODE (type)))
2883 real_inf (&max);
2884 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2886 else
2887 real_maxval (&min, 1, TYPE_MODE (type));
2888 return build_real (type, min);
2890 else
2892 gcc_assert (INTEGRAL_TYPE_P (type));
2893 return TYPE_MIN_VALUE (type);
2896 case MIN_EXPR:
2897 if (SCALAR_FLOAT_TYPE_P (type))
2899 REAL_VALUE_TYPE max;
2900 if (HONOR_INFINITIES (TYPE_MODE (type)))
2901 real_inf (&max);
2902 else
2903 real_maxval (&max, 0, TYPE_MODE (type));
2904 return build_real (type, max);
2906 else
2908 gcc_assert (INTEGRAL_TYPE_P (type));
2909 return TYPE_MAX_VALUE (type);
2912 default:
2913 gcc_unreachable ();
2917 /* Return alignment to be assumed for var in CLAUSE, which should be
2918 OMP_CLAUSE_ALIGNED. */
2920 static tree
2921 omp_clause_aligned_alignment (tree clause)
2923 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
2924 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
2926 /* Otherwise return implementation defined alignment. */
2927 unsigned int al = 1;
2928 enum machine_mode mode, vmode;
2929 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2930 if (vs)
2931 vs = 1 << floor_log2 (vs);
2932 static enum mode_class classes[]
2933 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
2934 for (int i = 0; i < 4; i += 2)
2935 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
2936 mode != VOIDmode;
2937 mode = GET_MODE_WIDER_MODE (mode))
2939 vmode = targetm.vectorize.preferred_simd_mode (mode);
2940 if (GET_MODE_CLASS (vmode) != classes[i + 1])
2941 continue;
2942 while (vs
2943 && GET_MODE_SIZE (vmode) < vs
2944 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
2945 vmode = GET_MODE_2XWIDER_MODE (vmode);
2947 tree type = lang_hooks.types.type_for_mode (mode, 1);
2948 if (type == NULL_TREE || TYPE_MODE (type) != mode)
2949 continue;
2950 type = build_vector_type (type, GET_MODE_SIZE (vmode)
2951 / GET_MODE_SIZE (mode));
2952 if (TYPE_MODE (type) != vmode)
2953 continue;
2954 if (TYPE_ALIGN_UNIT (type) > al)
2955 al = TYPE_ALIGN_UNIT (type);
2957 return build_int_cst (integer_type_node, al);
2960 /* Return maximum possible vectorization factor for the target. */
2962 static int
2963 omp_max_vf (void)
2965 if (!optimize
2966 || optimize_debug
2967 || !flag_tree_loop_optimize
2968 || (!flag_tree_loop_vectorize
2969 && (global_options_set.x_flag_tree_loop_vectorize
2970 || global_options_set.x_flag_tree_vectorize)))
2971 return 1;
2973 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2974 if (vs)
2976 vs = 1 << floor_log2 (vs);
2977 return vs;
2979 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
2980 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
2981 return GET_MODE_NUNITS (vqimode);
2982 return 1;
2985 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2986 privatization. */
2988 static bool
2989 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
2990 tree &idx, tree &lane, tree &ivar, tree &lvar)
2992 if (max_vf == 0)
2994 max_vf = omp_max_vf ();
2995 if (max_vf > 1)
2997 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2998 OMP_CLAUSE_SAFELEN);
2999 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3000 max_vf = 1;
3001 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3002 max_vf) == -1)
3003 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3005 if (max_vf > 1)
3007 idx = create_tmp_var (unsigned_type_node, NULL);
3008 lane = create_tmp_var (unsigned_type_node, NULL);
3011 if (max_vf == 1)
3012 return false;
3014 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3015 tree avar = create_tmp_var_raw (atype, NULL);
3016 if (TREE_ADDRESSABLE (new_var))
3017 TREE_ADDRESSABLE (avar) = 1;
3018 DECL_ATTRIBUTES (avar)
3019 = tree_cons (get_identifier ("omp simd array"), NULL,
3020 DECL_ATTRIBUTES (avar));
3021 gimple_add_tmp_var (avar);
3022 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3023 NULL_TREE, NULL_TREE);
3024 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3025 NULL_TREE, NULL_TREE);
3026 if (DECL_P (new_var))
3028 SET_DECL_VALUE_EXPR (new_var, lvar);
3029 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3031 return true;
3034 /* Helper function of lower_rec_input_clauses. For a reference
3035 in simd reduction, add an underlying variable it will reference. */
3037 static void
3038 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3040 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3041 if (TREE_CONSTANT (z))
3043 const char *name = NULL;
3044 if (DECL_NAME (new_vard))
3045 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3047 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3048 gimple_add_tmp_var (z);
3049 TREE_ADDRESSABLE (z) = 1;
3050 z = build_fold_addr_expr_loc (loc, z);
3051 gimplify_assign (new_vard, z, ilist);
3055 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3056 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3057 private variables. Initialization statements go in ILIST, while calls
3058 to destructors go in DLIST. */
3060 static void
3061 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3062 omp_context *ctx, struct omp_for_data *fd)
3064 tree c, dtor, copyin_seq, x, ptr;
3065 bool copyin_by_ref = false;
3066 bool lastprivate_firstprivate = false;
3067 bool reduction_omp_orig_ref = false;
3068 int pass;
3069 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3070 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3071 int max_vf = 0;
3072 tree lane = NULL_TREE, idx = NULL_TREE;
3073 tree ivar = NULL_TREE, lvar = NULL_TREE;
3074 gimple_seq llist[2] = { NULL, NULL };
3076 copyin_seq = NULL;
3078 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3079 with data sharing clauses referencing variable sized vars. That
3080 is unnecessarily hard to support and very unlikely to result in
3081 vectorized code anyway. */
3082 if (is_simd)
3083 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3084 switch (OMP_CLAUSE_CODE (c))
3086 case OMP_CLAUSE_LINEAR:
3087 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3088 max_vf = 1;
3089 /* FALLTHRU */
3090 case OMP_CLAUSE_REDUCTION:
3091 case OMP_CLAUSE_PRIVATE:
3092 case OMP_CLAUSE_FIRSTPRIVATE:
3093 case OMP_CLAUSE_LASTPRIVATE:
3094 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3095 max_vf = 1;
3096 break;
3097 default:
3098 continue;
3101 /* Do all the fixed sized types in the first pass, and the variable sized
3102 types in the second pass. This makes sure that the scalar arguments to
3103 the variable sized types are processed before we use them in the
3104 variable sized operations. */
3105 for (pass = 0; pass < 2; ++pass)
3107 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3109 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3110 tree var, new_var;
3111 bool by_ref;
3112 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3114 switch (c_kind)
3116 case OMP_CLAUSE_PRIVATE:
3117 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3118 continue;
3119 break;
3120 case OMP_CLAUSE_SHARED:
3121 /* Ignore shared directives in teams construct. */
3122 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3123 continue;
3124 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3126 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3127 continue;
3129 case OMP_CLAUSE_FIRSTPRIVATE:
3130 case OMP_CLAUSE_COPYIN:
3131 case OMP_CLAUSE_LINEAR:
3132 break;
3133 case OMP_CLAUSE_REDUCTION:
3134 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3135 reduction_omp_orig_ref = true;
3136 break;
3137 case OMP_CLAUSE__LOOPTEMP_:
3138 /* Handle _looptemp_ clauses only on parallel. */
3139 if (fd)
3140 continue;
3141 break;
3142 case OMP_CLAUSE_LASTPRIVATE:
3143 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3145 lastprivate_firstprivate = true;
3146 if (pass != 0)
3147 continue;
3149 /* Even without corresponding firstprivate, if
3150 decl is Fortran allocatable, it needs outer var
3151 reference. */
3152 else if (pass == 0
3153 && lang_hooks.decls.omp_private_outer_ref
3154 (OMP_CLAUSE_DECL (c)))
3155 lastprivate_firstprivate = true;
3156 break;
3157 case OMP_CLAUSE_ALIGNED:
3158 if (pass == 0)
3159 continue;
3160 var = OMP_CLAUSE_DECL (c);
3161 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3162 && !is_global_var (var))
3164 new_var = maybe_lookup_decl (var, ctx);
3165 if (new_var == NULL_TREE)
3166 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3167 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3168 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3169 omp_clause_aligned_alignment (c));
3170 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3171 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3172 gimplify_and_add (x, ilist);
3174 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3175 && is_global_var (var))
3177 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3178 new_var = lookup_decl (var, ctx);
3179 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3180 t = build_fold_addr_expr_loc (clause_loc, t);
3181 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3182 t = build_call_expr_loc (clause_loc, t2, 2, t,
3183 omp_clause_aligned_alignment (c));
3184 t = fold_convert_loc (clause_loc, ptype, t);
3185 x = create_tmp_var (ptype, NULL);
3186 t = build2 (MODIFY_EXPR, ptype, x, t);
3187 gimplify_and_add (t, ilist);
3188 t = build_simple_mem_ref_loc (clause_loc, x);
3189 SET_DECL_VALUE_EXPR (new_var, t);
3190 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3192 continue;
3193 default:
3194 continue;
3197 new_var = var = OMP_CLAUSE_DECL (c);
3198 if (c_kind != OMP_CLAUSE_COPYIN)
3199 new_var = lookup_decl (var, ctx);
3201 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3203 if (pass != 0)
3204 continue;
3206 else if (is_variable_sized (var))
3208 /* For variable sized types, we need to allocate the
3209 actual storage here. Call alloca and store the
3210 result in the pointer decl that we created elsewhere. */
3211 if (pass == 0)
3212 continue;
3214 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3216 gimple stmt;
3217 tree tmp, atmp;
3219 ptr = DECL_VALUE_EXPR (new_var);
3220 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3221 ptr = TREE_OPERAND (ptr, 0);
3222 gcc_assert (DECL_P (ptr));
3223 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3225 /* void *tmp = __builtin_alloca */
3226 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3227 stmt = gimple_build_call (atmp, 1, x);
3228 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3229 gimple_add_tmp_var (tmp);
3230 gimple_call_set_lhs (stmt, tmp);
3232 gimple_seq_add_stmt (ilist, stmt);
3234 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3235 gimplify_assign (ptr, x, ilist);
3238 else if (is_reference (var))
3240 /* For references that are being privatized for Fortran,
3241 allocate new backing storage for the new pointer
3242 variable. This allows us to avoid changing all the
3243 code that expects a pointer to something that expects
3244 a direct variable. */
3245 if (pass == 0)
3246 continue;
3248 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3249 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3251 x = build_receiver_ref (var, false, ctx);
3252 x = build_fold_addr_expr_loc (clause_loc, x);
3254 else if (TREE_CONSTANT (x))
3256 /* For reduction in SIMD loop, defer adding the
3257 initialization of the reference, because if we decide
3258 to use SIMD array for it, the initilization could cause
3259 expansion ICE. */
3260 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3261 x = NULL_TREE;
3262 else
3264 const char *name = NULL;
3265 if (DECL_NAME (var))
3266 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3268 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3269 name);
3270 gimple_add_tmp_var (x);
3271 TREE_ADDRESSABLE (x) = 1;
3272 x = build_fold_addr_expr_loc (clause_loc, x);
3275 else
3277 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3278 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3281 if (x)
3283 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3284 gimplify_assign (new_var, x, ilist);
3287 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3289 else if (c_kind == OMP_CLAUSE_REDUCTION
3290 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3292 if (pass == 0)
3293 continue;
3295 else if (pass != 0)
3296 continue;
3298 switch (OMP_CLAUSE_CODE (c))
3300 case OMP_CLAUSE_SHARED:
3301 /* Ignore shared directives in teams construct. */
3302 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3303 continue;
3304 /* Shared global vars are just accessed directly. */
3305 if (is_global_var (new_var))
3306 break;
3307 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3308 needs to be delayed until after fixup_child_record_type so
3309 that we get the correct type during the dereference. */
3310 by_ref = use_pointer_for_field (var, ctx);
3311 x = build_receiver_ref (var, by_ref, ctx);
3312 SET_DECL_VALUE_EXPR (new_var, x);
3313 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3315 /* ??? If VAR is not passed by reference, and the variable
3316 hasn't been initialized yet, then we'll get a warning for
3317 the store into the omp_data_s structure. Ideally, we'd be
3318 able to notice this and not store anything at all, but
3319 we're generating code too early. Suppress the warning. */
3320 if (!by_ref)
3321 TREE_NO_WARNING (var) = 1;
3322 break;
3324 case OMP_CLAUSE_LASTPRIVATE:
3325 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3326 break;
3327 /* FALLTHRU */
3329 case OMP_CLAUSE_PRIVATE:
3330 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3331 x = build_outer_var_ref (var, ctx);
3332 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3334 if (is_task_ctx (ctx))
3335 x = build_receiver_ref (var, false, ctx);
3336 else
3337 x = build_outer_var_ref (var, ctx);
3339 else
3340 x = NULL;
3341 do_private:
3342 tree nx;
3343 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3344 if (is_simd)
3346 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3347 if ((TREE_ADDRESSABLE (new_var) || nx || y
3348 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3349 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3350 idx, lane, ivar, lvar))
3352 if (nx)
3353 x = lang_hooks.decls.omp_clause_default_ctor
3354 (c, unshare_expr (ivar), x);
3355 if (nx && x)
3356 gimplify_and_add (x, &llist[0]);
3357 if (y)
3359 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3360 if (y)
3362 gimple_seq tseq = NULL;
3364 dtor = y;
3365 gimplify_stmt (&dtor, &tseq);
3366 gimple_seq_add_seq (&llist[1], tseq);
3369 break;
3372 if (nx)
3373 gimplify_and_add (nx, ilist);
3374 /* FALLTHRU */
3376 do_dtor:
3377 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3378 if (x)
3380 gimple_seq tseq = NULL;
3382 dtor = x;
3383 gimplify_stmt (&dtor, &tseq);
3384 gimple_seq_add_seq (dlist, tseq);
3386 break;
3388 case OMP_CLAUSE_LINEAR:
3389 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3390 goto do_firstprivate;
3391 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3392 x = NULL;
3393 else
3394 x = build_outer_var_ref (var, ctx);
3395 goto do_private;
3397 case OMP_CLAUSE_FIRSTPRIVATE:
3398 if (is_task_ctx (ctx))
3400 if (is_reference (var) || is_variable_sized (var))
3401 goto do_dtor;
3402 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3403 ctx))
3404 || use_pointer_for_field (var, NULL))
3406 x = build_receiver_ref (var, false, ctx);
3407 SET_DECL_VALUE_EXPR (new_var, x);
3408 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3409 goto do_dtor;
3412 do_firstprivate:
3413 x = build_outer_var_ref (var, ctx);
3414 if (is_simd)
3416 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3417 && gimple_omp_for_combined_into_p (ctx->stmt))
3419 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3420 tree stept = TREE_TYPE (t);
3421 tree ct = find_omp_clause (clauses,
3422 OMP_CLAUSE__LOOPTEMP_);
3423 gcc_assert (ct);
3424 tree l = OMP_CLAUSE_DECL (ct);
3425 tree n1 = fd->loop.n1;
3426 tree step = fd->loop.step;
3427 tree itype = TREE_TYPE (l);
3428 if (POINTER_TYPE_P (itype))
3429 itype = signed_type_for (itype);
3430 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3431 if (TYPE_UNSIGNED (itype)
3432 && fd->loop.cond_code == GT_EXPR)
3433 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3434 fold_build1 (NEGATE_EXPR, itype, l),
3435 fold_build1 (NEGATE_EXPR,
3436 itype, step));
3437 else
3438 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3439 t = fold_build2 (MULT_EXPR, stept,
3440 fold_convert (stept, l), t);
3442 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3444 x = lang_hooks.decls.omp_clause_linear_ctor
3445 (c, new_var, x, t);
3446 gimplify_and_add (x, ilist);
3447 goto do_dtor;
3450 if (POINTER_TYPE_P (TREE_TYPE (x)))
3451 x = fold_build2 (POINTER_PLUS_EXPR,
3452 TREE_TYPE (x), x, t);
3453 else
3454 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3457 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3458 || TREE_ADDRESSABLE (new_var))
3459 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3460 idx, lane, ivar, lvar))
3462 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3464 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3465 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3466 gimplify_and_add (x, ilist);
3467 gimple_stmt_iterator gsi
3468 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3469 gimple g
3470 = gimple_build_assign (unshare_expr (lvar), iv);
3471 gsi_insert_before_without_update (&gsi, g,
3472 GSI_SAME_STMT);
3473 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3474 enum tree_code code = PLUS_EXPR;
3475 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3476 code = POINTER_PLUS_EXPR;
3477 g = gimple_build_assign_with_ops (code, iv, iv, t);
3478 gsi_insert_before_without_update (&gsi, g,
3479 GSI_SAME_STMT);
3480 break;
3482 x = lang_hooks.decls.omp_clause_copy_ctor
3483 (c, unshare_expr (ivar), x);
3484 gimplify_and_add (x, &llist[0]);
3485 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3486 if (x)
3488 gimple_seq tseq = NULL;
3490 dtor = x;
3491 gimplify_stmt (&dtor, &tseq);
3492 gimple_seq_add_seq (&llist[1], tseq);
3494 break;
3497 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3498 gimplify_and_add (x, ilist);
3499 goto do_dtor;
3501 case OMP_CLAUSE__LOOPTEMP_:
3502 gcc_assert (is_parallel_ctx (ctx));
3503 x = build_outer_var_ref (var, ctx);
3504 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3505 gimplify_and_add (x, ilist);
3506 break;
3508 case OMP_CLAUSE_COPYIN:
3509 by_ref = use_pointer_for_field (var, NULL);
3510 x = build_receiver_ref (var, by_ref, ctx);
3511 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3512 append_to_statement_list (x, &copyin_seq);
3513 copyin_by_ref |= by_ref;
3514 break;
3516 case OMP_CLAUSE_REDUCTION:
3517 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3519 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3520 gimple tseq;
3521 x = build_outer_var_ref (var, ctx);
3523 if (is_reference (var)
3524 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3525 TREE_TYPE (x)))
3526 x = build_fold_addr_expr_loc (clause_loc, x);
3527 SET_DECL_VALUE_EXPR (placeholder, x);
3528 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3529 tree new_vard = new_var;
3530 if (is_reference (var))
3532 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3533 new_vard = TREE_OPERAND (new_var, 0);
3534 gcc_assert (DECL_P (new_vard));
3536 if (is_simd
3537 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3538 idx, lane, ivar, lvar))
3540 if (new_vard == new_var)
3542 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3543 SET_DECL_VALUE_EXPR (new_var, ivar);
3545 else
3547 SET_DECL_VALUE_EXPR (new_vard,
3548 build_fold_addr_expr (ivar));
3549 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3551 x = lang_hooks.decls.omp_clause_default_ctor
3552 (c, unshare_expr (ivar),
3553 build_outer_var_ref (var, ctx));
3554 if (x)
3555 gimplify_and_add (x, &llist[0]);
3556 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3558 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3559 lower_omp (&tseq, ctx);
3560 gimple_seq_add_seq (&llist[0], tseq);
3562 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3563 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3564 lower_omp (&tseq, ctx);
3565 gimple_seq_add_seq (&llist[1], tseq);
3566 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3567 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3568 if (new_vard == new_var)
3569 SET_DECL_VALUE_EXPR (new_var, lvar);
3570 else
3571 SET_DECL_VALUE_EXPR (new_vard,
3572 build_fold_addr_expr (lvar));
3573 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3574 if (x)
3576 tseq = NULL;
3577 dtor = x;
3578 gimplify_stmt (&dtor, &tseq);
3579 gimple_seq_add_seq (&llist[1], tseq);
3581 break;
3583 /* If this is a reference to constant size reduction var
3584 with placeholder, we haven't emitted the initializer
3585 for it because it is undesirable if SIMD arrays are used.
3586 But if they aren't used, we need to emit the deferred
3587 initialization now. */
3588 else if (is_reference (var) && is_simd)
3589 handle_simd_reference (clause_loc, new_vard, ilist);
3590 x = lang_hooks.decls.omp_clause_default_ctor
3591 (c, unshare_expr (new_var),
3592 build_outer_var_ref (var, ctx));
3593 if (x)
3594 gimplify_and_add (x, ilist);
3595 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3597 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3598 lower_omp (&tseq, ctx);
3599 gimple_seq_add_seq (ilist, tseq);
3601 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3602 if (is_simd)
3604 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3605 lower_omp (&tseq, ctx);
3606 gimple_seq_add_seq (dlist, tseq);
3607 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3609 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3610 goto do_dtor;
3612 else
3614 x = omp_reduction_init (c, TREE_TYPE (new_var));
3615 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3616 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3618 /* reduction(-:var) sums up the partial results, so it
3619 acts identically to reduction(+:var). */
3620 if (code == MINUS_EXPR)
3621 code = PLUS_EXPR;
3623 tree new_vard = new_var;
3624 if (is_simd && is_reference (var))
3626 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3627 new_vard = TREE_OPERAND (new_var, 0);
3628 gcc_assert (DECL_P (new_vard));
3630 if (is_simd
3631 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3632 idx, lane, ivar, lvar))
3634 tree ref = build_outer_var_ref (var, ctx);
3636 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3638 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3639 ref = build_outer_var_ref (var, ctx);
3640 gimplify_assign (ref, x, &llist[1]);
3642 if (new_vard != new_var)
3644 SET_DECL_VALUE_EXPR (new_vard,
3645 build_fold_addr_expr (lvar));
3646 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3649 else
3651 if (is_reference (var) && is_simd)
3652 handle_simd_reference (clause_loc, new_vard, ilist);
3653 gimplify_assign (new_var, x, ilist);
3654 if (is_simd)
3656 tree ref = build_outer_var_ref (var, ctx);
3658 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3659 ref = build_outer_var_ref (var, ctx);
3660 gimplify_assign (ref, x, dlist);
3664 break;
3666 default:
3667 gcc_unreachable ();
3672 if (lane)
3674 tree uid = create_tmp_var (ptr_type_node, "simduid");
3675 /* Don't want uninit warnings on simduid, it is always uninitialized,
3676 but we use it not for the value, but for the DECL_UID only. */
3677 TREE_NO_WARNING (uid) = 1;
3678 gimple g
3679 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3680 gimple_call_set_lhs (g, lane);
3681 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3682 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3683 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3684 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3685 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3686 gimple_omp_for_set_clauses (ctx->stmt, c);
3687 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3688 build_int_cst (unsigned_type_node, 0),
3689 NULL_TREE);
3690 gimple_seq_add_stmt (ilist, g);
3691 for (int i = 0; i < 2; i++)
3692 if (llist[i])
3694 tree vf = create_tmp_var (unsigned_type_node, NULL);
3695 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3696 gimple_call_set_lhs (g, vf);
3697 gimple_seq *seq = i == 0 ? ilist : dlist;
3698 gimple_seq_add_stmt (seq, g);
3699 tree t = build_int_cst (unsigned_type_node, 0);
3700 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3701 gimple_seq_add_stmt (seq, g);
3702 tree body = create_artificial_label (UNKNOWN_LOCATION);
3703 tree header = create_artificial_label (UNKNOWN_LOCATION);
3704 tree end = create_artificial_label (UNKNOWN_LOCATION);
3705 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3706 gimple_seq_add_stmt (seq, gimple_build_label (body));
3707 gimple_seq_add_seq (seq, llist[i]);
3708 t = build_int_cst (unsigned_type_node, 1);
3709 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3710 gimple_seq_add_stmt (seq, g);
3711 gimple_seq_add_stmt (seq, gimple_build_label (header));
3712 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3713 gimple_seq_add_stmt (seq, g);
3714 gimple_seq_add_stmt (seq, gimple_build_label (end));
3718 /* The copyin sequence is not to be executed by the main thread, since
3719 that would result in self-copies. Perhaps not visible to scalars,
3720 but it certainly is to C++ operator=. */
3721 if (copyin_seq)
3723 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3725 x = build2 (NE_EXPR, boolean_type_node, x,
3726 build_int_cst (TREE_TYPE (x), 0));
3727 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3728 gimplify_and_add (x, ilist);
3731 /* If any copyin variable is passed by reference, we must ensure the
3732 master thread doesn't modify it before it is copied over in all
3733 threads. Similarly for variables in both firstprivate and
3734 lastprivate clauses we need to ensure the lastprivate copying
3735 happens after firstprivate copying in all threads. And similarly
3736 for UDRs if initializer expression refers to omp_orig. */
3737 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3739 /* Don't add any barrier for #pragma omp simd or
3740 #pragma omp distribute. */
3741 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3742 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
3743 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3746 /* If max_vf is non-zero, then we can use only a vectorization factor
3747 up to the max_vf we chose. So stick it into the safelen clause. */
3748 if (max_vf)
3750 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3751 OMP_CLAUSE_SAFELEN);
3752 if (c == NULL_TREE
3753 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
3754 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3755 max_vf) == 1))
3757 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3758 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3759 max_vf);
3760 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3761 gimple_omp_for_set_clauses (ctx->stmt, c);
3767 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3768 both parallel and workshare constructs. PREDICATE may be NULL if it's
3769 always true. */
3771 static void
3772 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3773 omp_context *ctx)
3775 tree x, c, label = NULL, orig_clauses = clauses;
3776 bool par_clauses = false;
3777 tree simduid = NULL, lastlane = NULL;
3779 /* Early exit if there are no lastprivate or linear clauses. */
3780 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3781 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3782 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3783 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3784 break;
3785 if (clauses == NULL)
3787 /* If this was a workshare clause, see if it had been combined
3788 with its parallel. In that case, look for the clauses on the
3789 parallel statement itself. */
3790 if (is_parallel_ctx (ctx))
3791 return;
3793 ctx = ctx->outer;
3794 if (ctx == NULL || !is_parallel_ctx (ctx))
3795 return;
3797 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3798 OMP_CLAUSE_LASTPRIVATE);
3799 if (clauses == NULL)
3800 return;
3801 par_clauses = true;
3804 if (predicate)
3806 gimple stmt;
3807 tree label_true, arm1, arm2;
3809 label = create_artificial_label (UNKNOWN_LOCATION);
3810 label_true = create_artificial_label (UNKNOWN_LOCATION);
3811 arm1 = TREE_OPERAND (predicate, 0);
3812 arm2 = TREE_OPERAND (predicate, 1);
3813 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3814 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3815 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3816 label_true, label);
3817 gimple_seq_add_stmt (stmt_list, stmt);
3818 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3821 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3822 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3824 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3825 if (simduid)
3826 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3829 for (c = clauses; c ;)
3831 tree var, new_var;
3832 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3834 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3835 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3836 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3838 var = OMP_CLAUSE_DECL (c);
3839 new_var = lookup_decl (var, ctx);
3841 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3843 tree val = DECL_VALUE_EXPR (new_var);
3844 if (TREE_CODE (val) == ARRAY_REF
3845 && VAR_P (TREE_OPERAND (val, 0))
3846 && lookup_attribute ("omp simd array",
3847 DECL_ATTRIBUTES (TREE_OPERAND (val,
3848 0))))
3850 if (lastlane == NULL)
3852 lastlane = create_tmp_var (unsigned_type_node, NULL);
3853 gimple g
3854 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3855 2, simduid,
3856 TREE_OPERAND (val, 1));
3857 gimple_call_set_lhs (g, lastlane);
3858 gimple_seq_add_stmt (stmt_list, g);
3860 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3861 TREE_OPERAND (val, 0), lastlane,
3862 NULL_TREE, NULL_TREE);
3866 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3867 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
3869 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
3870 gimple_seq_add_seq (stmt_list,
3871 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
3872 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
3874 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3875 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
3877 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
3878 gimple_seq_add_seq (stmt_list,
3879 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
3880 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
3883 x = build_outer_var_ref (var, ctx);
3884 if (is_reference (var))
3885 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3886 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
3887 gimplify_and_add (x, stmt_list);
3889 c = OMP_CLAUSE_CHAIN (c);
3890 if (c == NULL && !par_clauses)
3892 /* If this was a workshare clause, see if it had been combined
3893 with its parallel. In that case, continue looking for the
3894 clauses also on the parallel statement itself. */
3895 if (is_parallel_ctx (ctx))
3896 break;
3898 ctx = ctx->outer;
3899 if (ctx == NULL || !is_parallel_ctx (ctx))
3900 break;
3902 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3903 OMP_CLAUSE_LASTPRIVATE);
3904 par_clauses = true;
3908 if (label)
3909 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
3913 /* Generate code to implement the REDUCTION clauses. */
3915 static void
3916 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
3918 gimple_seq sub_seq = NULL;
3919 gimple stmt;
3920 tree x, c;
3921 int count = 0;
3923 /* SIMD reductions are handled in lower_rec_input_clauses. */
3924 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3925 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3926 return;
3928 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3929 update in that case, otherwise use a lock. */
3930 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
3931 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
3933 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3935 /* Never use OMP_ATOMIC for array reductions or UDRs. */
3936 count = -1;
3937 break;
3939 count++;
3942 if (count == 0)
3943 return;
3945 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3947 tree var, ref, new_var;
3948 enum tree_code code;
3949 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3951 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
3952 continue;
3954 var = OMP_CLAUSE_DECL (c);
3955 new_var = lookup_decl (var, ctx);
3956 if (is_reference (var))
3957 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3958 ref = build_outer_var_ref (var, ctx);
3959 code = OMP_CLAUSE_REDUCTION_CODE (c);
3961 /* reduction(-:var) sums up the partial results, so it acts
3962 identically to reduction(+:var). */
3963 if (code == MINUS_EXPR)
3964 code = PLUS_EXPR;
3966 if (count == 1)
3968 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
3970 addr = save_expr (addr);
3971 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
3972 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
3973 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
3974 gimplify_and_add (x, stmt_seqp);
3975 return;
3978 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3980 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3982 if (is_reference (var)
3983 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3984 TREE_TYPE (ref)))
3985 ref = build_fold_addr_expr_loc (clause_loc, ref);
3986 SET_DECL_VALUE_EXPR (placeholder, ref);
3987 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3988 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
3989 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
3990 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3991 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
3993 else
3995 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3996 ref = build_outer_var_ref (var, ctx);
3997 gimplify_assign (ref, x, &sub_seq);
4001 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4003 gimple_seq_add_stmt (stmt_seqp, stmt);
4005 gimple_seq_add_seq (stmt_seqp, sub_seq);
4007 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4009 gimple_seq_add_stmt (stmt_seqp, stmt);
4013 /* Generate code to implement the COPYPRIVATE clauses. */
4015 static void
4016 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4017 omp_context *ctx)
4019 tree c;
4021 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4023 tree var, new_var, ref, x;
4024 bool by_ref;
4025 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4027 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4028 continue;
4030 var = OMP_CLAUSE_DECL (c);
4031 by_ref = use_pointer_for_field (var, NULL);
4033 ref = build_sender_ref (var, ctx);
4034 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4035 if (by_ref)
4037 x = build_fold_addr_expr_loc (clause_loc, new_var);
4038 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4040 gimplify_assign (ref, x, slist);
4042 ref = build_receiver_ref (var, false, ctx);
4043 if (by_ref)
4045 ref = fold_convert_loc (clause_loc,
4046 build_pointer_type (TREE_TYPE (new_var)),
4047 ref);
4048 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4050 if (is_reference (var))
4052 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4053 ref = build_simple_mem_ref_loc (clause_loc, ref);
4054 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4056 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4057 gimplify_and_add (x, rlist);
4062 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4063 and REDUCTION from the sender (aka parent) side. */
4065 static void
4066 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4067 omp_context *ctx)
4069 tree c;
4071 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4073 tree val, ref, x, var;
4074 bool by_ref, do_in = false, do_out = false;
4075 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4077 switch (OMP_CLAUSE_CODE (c))
4079 case OMP_CLAUSE_PRIVATE:
4080 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4081 break;
4082 continue;
4083 case OMP_CLAUSE_FIRSTPRIVATE:
4084 case OMP_CLAUSE_COPYIN:
4085 case OMP_CLAUSE_LASTPRIVATE:
4086 case OMP_CLAUSE_REDUCTION:
4087 case OMP_CLAUSE__LOOPTEMP_:
4088 break;
4089 default:
4090 continue;
4093 val = OMP_CLAUSE_DECL (c);
4094 var = lookup_decl_in_outer_ctx (val, ctx);
4096 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4097 && is_global_var (var))
4098 continue;
4099 if (is_variable_sized (val))
4100 continue;
4101 by_ref = use_pointer_for_field (val, NULL);
4103 switch (OMP_CLAUSE_CODE (c))
4105 case OMP_CLAUSE_PRIVATE:
4106 case OMP_CLAUSE_FIRSTPRIVATE:
4107 case OMP_CLAUSE_COPYIN:
4108 case OMP_CLAUSE__LOOPTEMP_:
4109 do_in = true;
4110 break;
4112 case OMP_CLAUSE_LASTPRIVATE:
4113 if (by_ref || is_reference (val))
4115 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4116 continue;
4117 do_in = true;
4119 else
4121 do_out = true;
4122 if (lang_hooks.decls.omp_private_outer_ref (val))
4123 do_in = true;
4125 break;
4127 case OMP_CLAUSE_REDUCTION:
4128 do_in = true;
4129 do_out = !(by_ref || is_reference (val));
4130 break;
4132 default:
4133 gcc_unreachable ();
4136 if (do_in)
4138 ref = build_sender_ref (val, ctx);
4139 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4140 gimplify_assign (ref, x, ilist);
4141 if (is_task_ctx (ctx))
4142 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4145 if (do_out)
4147 ref = build_sender_ref (val, ctx);
4148 gimplify_assign (var, ref, olist);
4153 /* Generate code to implement SHARED from the sender (aka parent)
4154 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4155 list things that got automatically shared. */
4157 static void
4158 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4160 tree var, ovar, nvar, f, x, record_type;
4162 if (ctx->record_type == NULL)
4163 return;
4165 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4166 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4168 ovar = DECL_ABSTRACT_ORIGIN (f);
4169 nvar = maybe_lookup_decl (ovar, ctx);
4170 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4171 continue;
4173 /* If CTX is a nested parallel directive. Find the immediately
4174 enclosing parallel or workshare construct that contains a
4175 mapping for OVAR. */
4176 var = lookup_decl_in_outer_ctx (ovar, ctx);
4178 if (use_pointer_for_field (ovar, ctx))
4180 x = build_sender_ref (ovar, ctx);
4181 var = build_fold_addr_expr (var);
4182 gimplify_assign (x, var, ilist);
4184 else
4186 x = build_sender_ref (ovar, ctx);
4187 gimplify_assign (x, var, ilist);
4189 if (!TREE_READONLY (var)
4190 /* We don't need to receive a new reference to a result
4191 or parm decl. In fact we may not store to it as we will
4192 invalidate any pending RSO and generate wrong gimple
4193 during inlining. */
4194 && !((TREE_CODE (var) == RESULT_DECL
4195 || TREE_CODE (var) == PARM_DECL)
4196 && DECL_BY_REFERENCE (var)))
4198 x = build_sender_ref (ovar, ctx);
4199 gimplify_assign (var, x, olist);
4206 /* A convenience function to build an empty GIMPLE_COND with just the
4207 condition. */
4209 static gimple
4210 gimple_build_cond_empty (tree cond)
4212 enum tree_code pred_code;
4213 tree lhs, rhs;
4215 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4216 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4220 /* Build the function calls to GOMP_parallel_start etc to actually
4221 generate the parallel operation. REGION is the parallel region
4222 being expanded. BB is the block where to insert the code. WS_ARGS
4223 will be set if this is a call to a combined parallel+workshare
4224 construct, it contains the list of additional arguments needed by
4225 the workshare construct. */
4227 static void
4228 expand_parallel_call (struct omp_region *region, basic_block bb,
4229 gimple entry_stmt, vec<tree, va_gc> *ws_args)
4231 tree t, t1, t2, val, cond, c, clauses, flags;
4232 gimple_stmt_iterator gsi;
4233 gimple stmt;
4234 enum built_in_function start_ix;
4235 int start_ix2;
4236 location_t clause_loc;
4237 vec<tree, va_gc> *args;
4239 clauses = gimple_omp_parallel_clauses (entry_stmt);
4241 /* Determine what flavor of GOMP_parallel we will be
4242 emitting. */
4243 start_ix = BUILT_IN_GOMP_PARALLEL;
4244 if (is_combined_parallel (region))
4246 switch (region->inner->type)
4248 case GIMPLE_OMP_FOR:
4249 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4250 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4251 + (region->inner->sched_kind
4252 == OMP_CLAUSE_SCHEDULE_RUNTIME
4253 ? 3 : region->inner->sched_kind));
4254 start_ix = (enum built_in_function)start_ix2;
4255 break;
4256 case GIMPLE_OMP_SECTIONS:
4257 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4258 break;
4259 default:
4260 gcc_unreachable ();
4264 /* By default, the value of NUM_THREADS is zero (selected at run time)
4265 and there is no conditional. */
4266 cond = NULL_TREE;
4267 val = build_int_cst (unsigned_type_node, 0);
4268 flags = build_int_cst (unsigned_type_node, 0);
4270 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4271 if (c)
4272 cond = OMP_CLAUSE_IF_EXPR (c);
4274 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4275 if (c)
4277 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4278 clause_loc = OMP_CLAUSE_LOCATION (c);
4280 else
4281 clause_loc = gimple_location (entry_stmt);
4283 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4284 if (c)
4285 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4287 /* Ensure 'val' is of the correct type. */
4288 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4290 /* If we found the clause 'if (cond)', build either
4291 (cond != 0) or (cond ? val : 1u). */
4292 if (cond)
4294 cond = gimple_boolify (cond);
4296 if (integer_zerop (val))
4297 val = fold_build2_loc (clause_loc,
4298 EQ_EXPR, unsigned_type_node, cond,
4299 build_int_cst (TREE_TYPE (cond), 0));
4300 else
4302 basic_block cond_bb, then_bb, else_bb;
4303 edge e, e_then, e_else;
4304 tree tmp_then, tmp_else, tmp_join, tmp_var;
4306 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4307 if (gimple_in_ssa_p (cfun))
4309 tmp_then = make_ssa_name (tmp_var, NULL);
4310 tmp_else = make_ssa_name (tmp_var, NULL);
4311 tmp_join = make_ssa_name (tmp_var, NULL);
4313 else
4315 tmp_then = tmp_var;
4316 tmp_else = tmp_var;
4317 tmp_join = tmp_var;
4320 e = split_block (bb, NULL);
4321 cond_bb = e->src;
4322 bb = e->dest;
4323 remove_edge (e);
4325 then_bb = create_empty_bb (cond_bb);
4326 else_bb = create_empty_bb (then_bb);
4327 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4328 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4330 stmt = gimple_build_cond_empty (cond);
4331 gsi = gsi_start_bb (cond_bb);
4332 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4334 gsi = gsi_start_bb (then_bb);
4335 stmt = gimple_build_assign (tmp_then, val);
4336 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4338 gsi = gsi_start_bb (else_bb);
4339 stmt = gimple_build_assign
4340 (tmp_else, build_int_cst (unsigned_type_node, 1));
4341 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4343 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4344 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4345 add_bb_to_loop (then_bb, cond_bb->loop_father);
4346 add_bb_to_loop (else_bb, cond_bb->loop_father);
4347 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4348 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4350 if (gimple_in_ssa_p (cfun))
4352 gimple phi = create_phi_node (tmp_join, bb);
4353 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4354 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4357 val = tmp_join;
4360 gsi = gsi_start_bb (bb);
4361 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4362 false, GSI_CONTINUE_LINKING);
4365 gsi = gsi_last_bb (bb);
4366 t = gimple_omp_parallel_data_arg (entry_stmt);
4367 if (t == NULL)
4368 t1 = null_pointer_node;
4369 else
4370 t1 = build_fold_addr_expr (t);
4371 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4373 vec_alloc (args, 4 + vec_safe_length (ws_args));
4374 args->quick_push (t2);
4375 args->quick_push (t1);
4376 args->quick_push (val);
4377 if (ws_args)
4378 args->splice (*ws_args);
4379 args->quick_push (flags);
4381 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4382 builtin_decl_explicit (start_ix), args);
4384 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4385 false, GSI_CONTINUE_LINKING);
4389 /* Build the function call to GOMP_task to actually
4390 generate the task operation. BB is the block where to insert the code. */
4392 static void
4393 expand_task_call (basic_block bb, gimple entry_stmt)
4395 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4396 gimple_stmt_iterator gsi;
4397 location_t loc = gimple_location (entry_stmt);
4399 clauses = gimple_omp_task_clauses (entry_stmt);
4401 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4402 if (c)
4403 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4404 else
4405 cond = boolean_true_node;
4407 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4408 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4409 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4410 flags = build_int_cst (unsigned_type_node,
4411 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4413 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4414 if (c)
4416 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4417 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4418 build_int_cst (unsigned_type_node, 2),
4419 build_int_cst (unsigned_type_node, 0));
4420 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4422 if (depend)
4423 depend = OMP_CLAUSE_DECL (depend);
4424 else
4425 depend = build_int_cst (ptr_type_node, 0);
4427 gsi = gsi_last_bb (bb);
4428 t = gimple_omp_task_data_arg (entry_stmt);
4429 if (t == NULL)
4430 t2 = null_pointer_node;
4431 else
4432 t2 = build_fold_addr_expr_loc (loc, t);
4433 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4434 t = gimple_omp_task_copy_fn (entry_stmt);
4435 if (t == NULL)
4436 t3 = null_pointer_node;
4437 else
4438 t3 = build_fold_addr_expr_loc (loc, t);
4440 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4441 8, t1, t2, t3,
4442 gimple_omp_task_arg_size (entry_stmt),
4443 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4444 depend);
4446 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4447 false, GSI_CONTINUE_LINKING);
4451 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4452 catch handler and return it. This prevents programs from violating the
4453 structured block semantics with throws. */
4455 static gimple_seq
4456 maybe_catch_exception (gimple_seq body)
4458 gimple g;
4459 tree decl;
4461 if (!flag_exceptions)
4462 return body;
4464 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4465 decl = lang_hooks.eh_protect_cleanup_actions ();
4466 else
4467 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4469 g = gimple_build_eh_must_not_throw (decl);
4470 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4471 GIMPLE_TRY_CATCH);
4473 return gimple_seq_alloc_with_stmt (g);
4476 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4478 static tree
4479 vec2chain (vec<tree, va_gc> *v)
4481 tree chain = NULL_TREE, t;
4482 unsigned ix;
4484 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4486 DECL_CHAIN (t) = chain;
4487 chain = t;
4490 return chain;
4494 /* Remove barriers in REGION->EXIT's block. Note that this is only
4495 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4496 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4497 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4498 removed. */
4500 static void
4501 remove_exit_barrier (struct omp_region *region)
4503 gimple_stmt_iterator gsi;
4504 basic_block exit_bb;
4505 edge_iterator ei;
4506 edge e;
4507 gimple stmt;
4508 int any_addressable_vars = -1;
4510 exit_bb = region->exit;
4512 /* If the parallel region doesn't return, we don't have REGION->EXIT
4513 block at all. */
4514 if (! exit_bb)
4515 return;
4517 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4518 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4519 statements that can appear in between are extremely limited -- no
4520 memory operations at all. Here, we allow nothing at all, so the
4521 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4522 gsi = gsi_last_bb (exit_bb);
4523 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4524 gsi_prev (&gsi);
4525 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4526 return;
4528 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4530 gsi = gsi_last_bb (e->src);
4531 if (gsi_end_p (gsi))
4532 continue;
4533 stmt = gsi_stmt (gsi);
4534 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4535 && !gimple_omp_return_nowait_p (stmt))
4537 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4538 in many cases. If there could be tasks queued, the barrier
4539 might be needed to let the tasks run before some local
4540 variable of the parallel that the task uses as shared
4541 runs out of scope. The task can be spawned either
4542 from within current function (this would be easy to check)
4543 or from some function it calls and gets passed an address
4544 of such a variable. */
4545 if (any_addressable_vars < 0)
4547 gimple parallel_stmt = last_stmt (region->entry);
4548 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4549 tree local_decls, block, decl;
4550 unsigned ix;
4552 any_addressable_vars = 0;
4553 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4554 if (TREE_ADDRESSABLE (decl))
4556 any_addressable_vars = 1;
4557 break;
4559 for (block = gimple_block (stmt);
4560 !any_addressable_vars
4561 && block
4562 && TREE_CODE (block) == BLOCK;
4563 block = BLOCK_SUPERCONTEXT (block))
4565 for (local_decls = BLOCK_VARS (block);
4566 local_decls;
4567 local_decls = DECL_CHAIN (local_decls))
4568 if (TREE_ADDRESSABLE (local_decls))
4570 any_addressable_vars = 1;
4571 break;
4573 if (block == gimple_block (parallel_stmt))
4574 break;
4577 if (!any_addressable_vars)
4578 gimple_omp_return_set_nowait (stmt);
4583 static void
4584 remove_exit_barriers (struct omp_region *region)
4586 if (region->type == GIMPLE_OMP_PARALLEL)
4587 remove_exit_barrier (region);
4589 if (region->inner)
4591 region = region->inner;
4592 remove_exit_barriers (region);
4593 while (region->next)
4595 region = region->next;
4596 remove_exit_barriers (region);
4601 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4602 calls. These can't be declared as const functions, but
4603 within one parallel body they are constant, so they can be
4604 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4605 which are declared const. Similarly for task body, except
4606 that in untied task omp_get_thread_num () can change at any task
4607 scheduling point. */
4609 static void
4610 optimize_omp_library_calls (gimple entry_stmt)
4612 basic_block bb;
4613 gimple_stmt_iterator gsi;
4614 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4615 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4616 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4617 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4618 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4619 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4620 OMP_CLAUSE_UNTIED) != NULL);
4622 FOR_EACH_BB_FN (bb, cfun)
4623 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4625 gimple call = gsi_stmt (gsi);
4626 tree decl;
4628 if (is_gimple_call (call)
4629 && (decl = gimple_call_fndecl (call))
4630 && DECL_EXTERNAL (decl)
4631 && TREE_PUBLIC (decl)
4632 && DECL_INITIAL (decl) == NULL)
4634 tree built_in;
4636 if (DECL_NAME (decl) == thr_num_id)
4638 /* In #pragma omp task untied omp_get_thread_num () can change
4639 during the execution of the task region. */
4640 if (untied_task)
4641 continue;
4642 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4644 else if (DECL_NAME (decl) == num_thr_id)
4645 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4646 else
4647 continue;
4649 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4650 || gimple_call_num_args (call) != 0)
4651 continue;
4653 if (flag_exceptions && !TREE_NOTHROW (decl))
4654 continue;
4656 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4657 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4658 TREE_TYPE (TREE_TYPE (built_in))))
4659 continue;
4661 gimple_call_set_fndecl (call, built_in);
4666 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4667 regimplified. */
4669 static tree
4670 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4672 tree t = *tp;
4674 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4675 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4676 return t;
4678 if (TREE_CODE (t) == ADDR_EXPR)
4679 recompute_tree_invariant_for_addr_expr (t);
4681 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4682 return NULL_TREE;
4685 /* Prepend TO = FROM assignment before *GSI_P. */
4687 static void
4688 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4690 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4691 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4692 true, GSI_SAME_STMT);
4693 gimple stmt = gimple_build_assign (to, from);
4694 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4695 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4696 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4698 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4699 gimple_regimplify_operands (stmt, &gsi);
4703 /* Expand the OpenMP parallel or task directive starting at REGION. */
4705 static void
4706 expand_omp_taskreg (struct omp_region *region)
4708 basic_block entry_bb, exit_bb, new_bb;
4709 struct function *child_cfun;
4710 tree child_fn, block, t;
4711 gimple_stmt_iterator gsi;
4712 gimple entry_stmt, stmt;
4713 edge e;
4714 vec<tree, va_gc> *ws_args;
4716 entry_stmt = last_stmt (region->entry);
4717 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4718 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4720 entry_bb = region->entry;
4721 exit_bb = region->exit;
4723 if (is_combined_parallel (region))
4724 ws_args = region->ws_args;
4725 else
4726 ws_args = NULL;
4728 if (child_cfun->cfg)
4730 /* Due to inlining, it may happen that we have already outlined
4731 the region, in which case all we need to do is make the
4732 sub-graph unreachable and emit the parallel call. */
4733 edge entry_succ_e, exit_succ_e;
4735 entry_succ_e = single_succ_edge (entry_bb);
4737 gsi = gsi_last_bb (entry_bb);
4738 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4739 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4740 gsi_remove (&gsi, true);
4742 new_bb = entry_bb;
4743 if (exit_bb)
4745 exit_succ_e = single_succ_edge (exit_bb);
4746 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4748 remove_edge_and_dominated_blocks (entry_succ_e);
4750 else
4752 unsigned srcidx, dstidx, num;
4754 /* If the parallel region needs data sent from the parent
4755 function, then the very first statement (except possible
4756 tree profile counter updates) of the parallel body
4757 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4758 &.OMP_DATA_O is passed as an argument to the child function,
4759 we need to replace it with the argument as seen by the child
4760 function.
4762 In most cases, this will end up being the identity assignment
4763 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4764 a function call that has been inlined, the original PARM_DECL
4765 .OMP_DATA_I may have been converted into a different local
4766 variable. In which case, we need to keep the assignment. */
4767 if (gimple_omp_taskreg_data_arg (entry_stmt))
4769 basic_block entry_succ_bb = single_succ (entry_bb);
4770 tree arg, narg;
4771 gimple parcopy_stmt = NULL;
4773 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4775 gimple stmt;
4777 gcc_assert (!gsi_end_p (gsi));
4778 stmt = gsi_stmt (gsi);
4779 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4780 continue;
4782 if (gimple_num_ops (stmt) == 2)
4784 tree arg = gimple_assign_rhs1 (stmt);
4786 /* We're ignore the subcode because we're
4787 effectively doing a STRIP_NOPS. */
4789 if (TREE_CODE (arg) == ADDR_EXPR
4790 && TREE_OPERAND (arg, 0)
4791 == gimple_omp_taskreg_data_arg (entry_stmt))
4793 parcopy_stmt = stmt;
4794 break;
4799 gcc_assert (parcopy_stmt != NULL);
4800 arg = DECL_ARGUMENTS (child_fn);
4802 if (!gimple_in_ssa_p (cfun))
4804 if (gimple_assign_lhs (parcopy_stmt) == arg)
4805 gsi_remove (&gsi, true);
4806 else
4808 /* ?? Is setting the subcode really necessary ?? */
4809 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4810 gimple_assign_set_rhs1 (parcopy_stmt, arg);
4813 else
4815 /* If we are in ssa form, we must load the value from the default
4816 definition of the argument. That should not be defined now,
4817 since the argument is not used uninitialized. */
4818 gcc_assert (ssa_default_def (cfun, arg) == NULL);
4819 narg = make_ssa_name (arg, gimple_build_nop ());
4820 set_ssa_default_def (cfun, arg, narg);
4821 /* ?? Is setting the subcode really necessary ?? */
4822 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
4823 gimple_assign_set_rhs1 (parcopy_stmt, narg);
4824 update_stmt (parcopy_stmt);
4828 /* Declare local variables needed in CHILD_CFUN. */
4829 block = DECL_INITIAL (child_fn);
4830 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
4831 /* The gimplifier could record temporaries in parallel/task block
4832 rather than in containing function's local_decls chain,
4833 which would mean cgraph missed finalizing them. Do it now. */
4834 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
4835 if (TREE_CODE (t) == VAR_DECL
4836 && TREE_STATIC (t)
4837 && !DECL_EXTERNAL (t))
4838 varpool_finalize_decl (t);
4839 DECL_SAVED_TREE (child_fn) = NULL;
4840 /* We'll create a CFG for child_fn, so no gimple body is needed. */
4841 gimple_set_body (child_fn, NULL);
4842 TREE_USED (block) = 1;
4844 /* Reset DECL_CONTEXT on function arguments. */
4845 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
4846 DECL_CONTEXT (t) = child_fn;
4848 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
4849 so that it can be moved to the child function. */
4850 gsi = gsi_last_bb (entry_bb);
4851 stmt = gsi_stmt (gsi);
4852 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
4853 || gimple_code (stmt) == GIMPLE_OMP_TASK));
4854 gsi_remove (&gsi, true);
4855 e = split_block (entry_bb, stmt);
4856 entry_bb = e->dest;
4857 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4859 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
4860 if (exit_bb)
4862 gsi = gsi_last_bb (exit_bb);
4863 gcc_assert (!gsi_end_p (gsi)
4864 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4865 stmt = gimple_build_return (NULL);
4866 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4867 gsi_remove (&gsi, true);
4870 /* Move the parallel region into CHILD_CFUN. */
4872 if (gimple_in_ssa_p (cfun))
4874 init_tree_ssa (child_cfun);
4875 init_ssa_operands (child_cfun);
4876 child_cfun->gimple_df->in_ssa_p = true;
4877 block = NULL_TREE;
4879 else
4880 block = gimple_block (entry_stmt);
4882 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
4883 if (exit_bb)
4884 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
4885 /* When the OMP expansion process cannot guarantee an up-to-date
4886 loop tree arrange for the child function to fixup loops. */
4887 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
4888 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
4890 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
4891 num = vec_safe_length (child_cfun->local_decls);
4892 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
4894 t = (*child_cfun->local_decls)[srcidx];
4895 if (DECL_CONTEXT (t) == cfun->decl)
4896 continue;
4897 if (srcidx != dstidx)
4898 (*child_cfun->local_decls)[dstidx] = t;
4899 dstidx++;
4901 if (dstidx != num)
4902 vec_safe_truncate (child_cfun->local_decls, dstidx);
4904 /* Inform the callgraph about the new function. */
4905 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
4906 cgraph_add_new_function (child_fn, true);
4908 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4909 fixed in a following pass. */
4910 push_cfun (child_cfun);
4911 if (optimize)
4912 optimize_omp_library_calls (entry_stmt);
4913 rebuild_cgraph_edges ();
4915 /* Some EH regions might become dead, see PR34608. If
4916 pass_cleanup_cfg isn't the first pass to happen with the
4917 new child, these dead EH edges might cause problems.
4918 Clean them up now. */
4919 if (flag_exceptions)
4921 basic_block bb;
4922 bool changed = false;
4924 FOR_EACH_BB_FN (bb, cfun)
4925 changed |= gimple_purge_dead_eh_edges (bb);
4926 if (changed)
4927 cleanup_tree_cfg ();
4929 if (gimple_in_ssa_p (cfun))
4930 update_ssa (TODO_update_ssa);
4931 pop_cfun ();
4934 /* Emit a library call to launch the children threads. */
4935 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
4936 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
4937 else
4938 expand_task_call (new_bb, entry_stmt);
4939 if (gimple_in_ssa_p (cfun))
4940 update_ssa (TODO_update_ssa_only_virtuals);
4944 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4945 of the combined collapse > 1 loop constructs, generate code like:
4946 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4947 if (cond3 is <)
4948 adj = STEP3 - 1;
4949 else
4950 adj = STEP3 + 1;
4951 count3 = (adj + N32 - N31) / STEP3;
4952 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4953 if (cond2 is <)
4954 adj = STEP2 - 1;
4955 else
4956 adj = STEP2 + 1;
4957 count2 = (adj + N22 - N21) / STEP2;
4958 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4959 if (cond1 is <)
4960 adj = STEP1 - 1;
4961 else
4962 adj = STEP1 + 1;
4963 count1 = (adj + N12 - N11) / STEP1;
4964 count = count1 * count2 * count3;
4965 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4966 count = 0;
4967 and set ZERO_ITER_BB to that bb. If this isn't the outermost
4968 of the combined loop constructs, just initialize COUNTS array
4969 from the _looptemp_ clauses. */
4971 /* NOTE: It *could* be better to moosh all of the BBs together,
4972 creating one larger BB with all the computation and the unexpected
4973 jump at the end. I.e.
4975 bool zero3, zero2, zero1, zero;
4977 zero3 = N32 c3 N31;
4978 count3 = (N32 - N31) /[cl] STEP3;
4979 zero2 = N22 c2 N21;
4980 count2 = (N22 - N21) /[cl] STEP2;
4981 zero1 = N12 c1 N11;
4982 count1 = (N12 - N11) /[cl] STEP1;
4983 zero = zero3 || zero2 || zero1;
4984 count = count1 * count2 * count3;
4985 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4987 After all, we expect the zero=false, and thus we expect to have to
4988 evaluate all of the comparison expressions, so short-circuiting
4989 oughtn't be a win. Since the condition isn't protecting a
4990 denominator, we're not concerned about divide-by-zero, so we can
4991 fully evaluate count even if a numerator turned out to be wrong.
4993 It seems like putting this all together would create much better
4994 scheduling opportunities, and less pressure on the chip's branch
4995 predictor. */
4997 static void
4998 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4999 basic_block &entry_bb, tree *counts,
5000 basic_block &zero_iter_bb, int &first_zero_iter,
5001 basic_block &l2_dom_bb)
5003 tree t, type = TREE_TYPE (fd->loop.v);
5004 gimple stmt;
5005 edge e, ne;
5006 int i;
5008 /* Collapsed loops need work for expansion into SSA form. */
5009 gcc_assert (!gimple_in_ssa_p (cfun));
5011 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5012 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5014 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5015 isn't supposed to be handled, as the inner loop doesn't
5016 use it. */
5017 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5018 OMP_CLAUSE__LOOPTEMP_);
5019 gcc_assert (innerc);
5020 for (i = 0; i < fd->collapse; i++)
5022 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5023 OMP_CLAUSE__LOOPTEMP_);
5024 gcc_assert (innerc);
5025 if (i)
5026 counts[i] = OMP_CLAUSE_DECL (innerc);
5027 else
5028 counts[0] = NULL_TREE;
5030 return;
5033 for (i = 0; i < fd->collapse; i++)
5035 tree itype = TREE_TYPE (fd->loops[i].v);
5037 if (SSA_VAR_P (fd->loop.n2)
5038 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5039 fold_convert (itype, fd->loops[i].n1),
5040 fold_convert (itype, fd->loops[i].n2)))
5041 == NULL_TREE || !integer_onep (t)))
5043 tree n1, n2;
5044 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5045 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5046 true, GSI_SAME_STMT);
5047 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5048 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5049 true, GSI_SAME_STMT);
5050 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5051 NULL_TREE, NULL_TREE);
5052 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5053 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5054 expand_omp_regimplify_p, NULL, NULL)
5055 || walk_tree (gimple_cond_rhs_ptr (stmt),
5056 expand_omp_regimplify_p, NULL, NULL))
5058 *gsi = gsi_for_stmt (stmt);
5059 gimple_regimplify_operands (stmt, gsi);
5061 e = split_block (entry_bb, stmt);
5062 if (zero_iter_bb == NULL)
5064 first_zero_iter = i;
5065 zero_iter_bb = create_empty_bb (entry_bb);
5066 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5067 *gsi = gsi_after_labels (zero_iter_bb);
5068 stmt = gimple_build_assign (fd->loop.n2,
5069 build_zero_cst (type));
5070 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5071 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5072 entry_bb);
5074 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5075 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5076 e->flags = EDGE_TRUE_VALUE;
5077 e->probability = REG_BR_PROB_BASE - ne->probability;
5078 if (l2_dom_bb == NULL)
5079 l2_dom_bb = entry_bb;
5080 entry_bb = e->dest;
5081 *gsi = gsi_last_bb (entry_bb);
5084 if (POINTER_TYPE_P (itype))
5085 itype = signed_type_for (itype);
5086 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5087 ? -1 : 1));
5088 t = fold_build2 (PLUS_EXPR, itype,
5089 fold_convert (itype, fd->loops[i].step), t);
5090 t = fold_build2 (PLUS_EXPR, itype, t,
5091 fold_convert (itype, fd->loops[i].n2));
5092 t = fold_build2 (MINUS_EXPR, itype, t,
5093 fold_convert (itype, fd->loops[i].n1));
5094 /* ?? We could probably use CEIL_DIV_EXPR instead of
5095 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5096 generate the same code in the end because generically we
5097 don't know that the values involved must be negative for
5098 GT?? */
5099 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5100 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5101 fold_build1 (NEGATE_EXPR, itype, t),
5102 fold_build1 (NEGATE_EXPR, itype,
5103 fold_convert (itype,
5104 fd->loops[i].step)));
5105 else
5106 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5107 fold_convert (itype, fd->loops[i].step));
5108 t = fold_convert (type, t);
5109 if (TREE_CODE (t) == INTEGER_CST)
5110 counts[i] = t;
5111 else
5113 counts[i] = create_tmp_reg (type, ".count");
5114 expand_omp_build_assign (gsi, counts[i], t);
5116 if (SSA_VAR_P (fd->loop.n2))
5118 if (i == 0)
5119 t = counts[0];
5120 else
5121 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5122 expand_omp_build_assign (gsi, fd->loop.n2, t);
5128 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5129 T = V;
5130 V3 = N31 + (T % count3) * STEP3;
5131 T = T / count3;
5132 V2 = N21 + (T % count2) * STEP2;
5133 T = T / count2;
5134 V1 = N11 + T * STEP1;
5135 if this loop doesn't have an inner loop construct combined with it.
5136 If it does have an inner loop construct combined with it and the
5137 iteration count isn't known constant, store values from counts array
5138 into its _looptemp_ temporaries instead. */
5140 static void
5141 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5142 tree *counts, gimple inner_stmt, tree startvar)
5144 int i;
5145 if (gimple_omp_for_combined_p (fd->for_stmt))
5147 /* If fd->loop.n2 is constant, then no propagation of the counts
5148 is needed, they are constant. */
5149 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5150 return;
5152 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5153 ? gimple_omp_parallel_clauses (inner_stmt)
5154 : gimple_omp_for_clauses (inner_stmt);
5155 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5156 isn't supposed to be handled, as the inner loop doesn't
5157 use it. */
5158 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5159 gcc_assert (innerc);
5160 for (i = 0; i < fd->collapse; i++)
5162 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5163 OMP_CLAUSE__LOOPTEMP_);
5164 gcc_assert (innerc);
5165 if (i)
5167 tree tem = OMP_CLAUSE_DECL (innerc);
5168 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5169 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5170 false, GSI_CONTINUE_LINKING);
5171 gimple stmt = gimple_build_assign (tem, t);
5172 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5175 return;
5178 tree type = TREE_TYPE (fd->loop.v);
5179 tree tem = create_tmp_reg (type, ".tem");
5180 gimple stmt = gimple_build_assign (tem, startvar);
5181 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5183 for (i = fd->collapse - 1; i >= 0; i--)
5185 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5186 itype = vtype;
5187 if (POINTER_TYPE_P (vtype))
5188 itype = signed_type_for (vtype);
5189 if (i != 0)
5190 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5191 else
5192 t = tem;
5193 t = fold_convert (itype, t);
5194 t = fold_build2 (MULT_EXPR, itype, t,
5195 fold_convert (itype, fd->loops[i].step));
5196 if (POINTER_TYPE_P (vtype))
5197 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5198 else
5199 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5200 t = force_gimple_operand_gsi (gsi, t,
5201 DECL_P (fd->loops[i].v)
5202 && TREE_ADDRESSABLE (fd->loops[i].v),
5203 NULL_TREE, false,
5204 GSI_CONTINUE_LINKING);
5205 stmt = gimple_build_assign (fd->loops[i].v, t);
5206 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5207 if (i != 0)
5209 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5210 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5211 false, GSI_CONTINUE_LINKING);
5212 stmt = gimple_build_assign (tem, t);
5213 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5219 /* Helper function for expand_omp_for_*. Generate code like:
5220 L10:
5221 V3 += STEP3;
5222 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5223 L11:
5224 V3 = N31;
5225 V2 += STEP2;
5226 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5227 L12:
5228 V2 = N21;
5229 V1 += STEP1;
5230 goto BODY_BB; */
5232 static basic_block
5233 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5234 basic_block body_bb)
5236 basic_block last_bb, bb, collapse_bb = NULL;
5237 int i;
5238 gimple_stmt_iterator gsi;
5239 edge e;
5240 tree t;
5241 gimple stmt;
5243 last_bb = cont_bb;
5244 for (i = fd->collapse - 1; i >= 0; i--)
5246 tree vtype = TREE_TYPE (fd->loops[i].v);
5248 bb = create_empty_bb (last_bb);
5249 add_bb_to_loop (bb, last_bb->loop_father);
5250 gsi = gsi_start_bb (bb);
5252 if (i < fd->collapse - 1)
5254 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5255 e->probability = REG_BR_PROB_BASE / 8;
5257 t = fd->loops[i + 1].n1;
5258 t = force_gimple_operand_gsi (&gsi, t,
5259 DECL_P (fd->loops[i + 1].v)
5260 && TREE_ADDRESSABLE (fd->loops[i
5261 + 1].v),
5262 NULL_TREE, false,
5263 GSI_CONTINUE_LINKING);
5264 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5265 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5267 else
5268 collapse_bb = bb;
5270 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5272 if (POINTER_TYPE_P (vtype))
5273 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5274 else
5275 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5276 t = force_gimple_operand_gsi (&gsi, t,
5277 DECL_P (fd->loops[i].v)
5278 && TREE_ADDRESSABLE (fd->loops[i].v),
5279 NULL_TREE, false, GSI_CONTINUE_LINKING);
5280 stmt = gimple_build_assign (fd->loops[i].v, t);
5281 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5283 if (i > 0)
5285 t = fd->loops[i].n2;
5286 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5287 false, GSI_CONTINUE_LINKING);
5288 tree v = fd->loops[i].v;
5289 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5290 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5291 false, GSI_CONTINUE_LINKING);
5292 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5293 stmt = gimple_build_cond_empty (t);
5294 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5295 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5296 e->probability = REG_BR_PROB_BASE * 7 / 8;
5298 else
5299 make_edge (bb, body_bb, EDGE_FALLTHRU);
5300 last_bb = bb;
5303 return collapse_bb;
5307 /* A subroutine of expand_omp_for. Generate code for a parallel
5308 loop with any schedule. Given parameters:
5310 for (V = N1; V cond N2; V += STEP) BODY;
5312 where COND is "<" or ">", we generate pseudocode
5314 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5315 if (more) goto L0; else goto L3;
5317 V = istart0;
5318 iend = iend0;
5320 BODY;
5321 V += STEP;
5322 if (V cond iend) goto L1; else goto L2;
5324 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5327 If this is a combined omp parallel loop, instead of the call to
5328 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5329 If this is gimple_omp_for_combined_p loop, then instead of assigning
5330 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5331 inner GIMPLE_OMP_FOR and V += STEP; and
5332 if (V cond iend) goto L1; else goto L2; are removed.
5334 For collapsed loops, given parameters:
5335 collapse(3)
5336 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5337 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5338 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5339 BODY;
5341 we generate pseudocode
5343 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5344 if (cond3 is <)
5345 adj = STEP3 - 1;
5346 else
5347 adj = STEP3 + 1;
5348 count3 = (adj + N32 - N31) / STEP3;
5349 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5350 if (cond2 is <)
5351 adj = STEP2 - 1;
5352 else
5353 adj = STEP2 + 1;
5354 count2 = (adj + N22 - N21) / STEP2;
5355 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5356 if (cond1 is <)
5357 adj = STEP1 - 1;
5358 else
5359 adj = STEP1 + 1;
5360 count1 = (adj + N12 - N11) / STEP1;
5361 count = count1 * count2 * count3;
5362 goto Z1;
5364 count = 0;
5366 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5367 if (more) goto L0; else goto L3;
5369 V = istart0;
5370 T = V;
5371 V3 = N31 + (T % count3) * STEP3;
5372 T = T / count3;
5373 V2 = N21 + (T % count2) * STEP2;
5374 T = T / count2;
5375 V1 = N11 + T * STEP1;
5376 iend = iend0;
5378 BODY;
5379 V += 1;
5380 if (V < iend) goto L10; else goto L2;
5381 L10:
5382 V3 += STEP3;
5383 if (V3 cond3 N32) goto L1; else goto L11;
5384 L11:
5385 V3 = N31;
5386 V2 += STEP2;
5387 if (V2 cond2 N22) goto L1; else goto L12;
5388 L12:
5389 V2 = N21;
5390 V1 += STEP1;
5391 goto L1;
5393 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5398 static void
5399 expand_omp_for_generic (struct omp_region *region,
5400 struct omp_for_data *fd,
5401 enum built_in_function start_fn,
5402 enum built_in_function next_fn,
5403 gimple inner_stmt)
5405 tree type, istart0, iend0, iend;
5406 tree t, vmain, vback, bias = NULL_TREE;
5407 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5408 basic_block l2_bb = NULL, l3_bb = NULL;
5409 gimple_stmt_iterator gsi;
5410 gimple stmt;
5411 bool in_combined_parallel = is_combined_parallel (region);
5412 bool broken_loop = region->cont == NULL;
5413 edge e, ne;
5414 tree *counts = NULL;
5415 int i;
5417 gcc_assert (!broken_loop || !in_combined_parallel);
5418 gcc_assert (fd->iter_type == long_integer_type_node
5419 || !in_combined_parallel);
5421 type = TREE_TYPE (fd->loop.v);
5422 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5423 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5424 TREE_ADDRESSABLE (istart0) = 1;
5425 TREE_ADDRESSABLE (iend0) = 1;
5427 /* See if we need to bias by LLONG_MIN. */
5428 if (fd->iter_type == long_long_unsigned_type_node
5429 && TREE_CODE (type) == INTEGER_TYPE
5430 && !TYPE_UNSIGNED (type))
5432 tree n1, n2;
5434 if (fd->loop.cond_code == LT_EXPR)
5436 n1 = fd->loop.n1;
5437 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5439 else
5441 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5442 n2 = fd->loop.n1;
5444 if (TREE_CODE (n1) != INTEGER_CST
5445 || TREE_CODE (n2) != INTEGER_CST
5446 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5447 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5450 entry_bb = region->entry;
5451 cont_bb = region->cont;
5452 collapse_bb = NULL;
5453 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5454 gcc_assert (broken_loop
5455 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5456 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5457 l1_bb = single_succ (l0_bb);
5458 if (!broken_loop)
5460 l2_bb = create_empty_bb (cont_bb);
5461 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5462 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5464 else
5465 l2_bb = NULL;
5466 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5467 exit_bb = region->exit;
5469 gsi = gsi_last_bb (entry_bb);
5471 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5472 if (fd->collapse > 1)
5474 int first_zero_iter = -1;
5475 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5477 counts = XALLOCAVEC (tree, fd->collapse);
5478 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5479 zero_iter_bb, first_zero_iter,
5480 l2_dom_bb);
5482 if (zero_iter_bb)
5484 /* Some counts[i] vars might be uninitialized if
5485 some loop has zero iterations. But the body shouldn't
5486 be executed in that case, so just avoid uninit warnings. */
5487 for (i = first_zero_iter; i < fd->collapse; i++)
5488 if (SSA_VAR_P (counts[i]))
5489 TREE_NO_WARNING (counts[i]) = 1;
5490 gsi_prev (&gsi);
5491 e = split_block (entry_bb, gsi_stmt (gsi));
5492 entry_bb = e->dest;
5493 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5494 gsi = gsi_last_bb (entry_bb);
5495 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5496 get_immediate_dominator (CDI_DOMINATORS,
5497 zero_iter_bb));
5500 if (in_combined_parallel)
5502 /* In a combined parallel loop, emit a call to
5503 GOMP_loop_foo_next. */
5504 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5505 build_fold_addr_expr (istart0),
5506 build_fold_addr_expr (iend0));
5508 else
5510 tree t0, t1, t2, t3, t4;
5511 /* If this is not a combined parallel loop, emit a call to
5512 GOMP_loop_foo_start in ENTRY_BB. */
5513 t4 = build_fold_addr_expr (iend0);
5514 t3 = build_fold_addr_expr (istart0);
5515 t2 = fold_convert (fd->iter_type, fd->loop.step);
5516 t1 = fd->loop.n2;
5517 t0 = fd->loop.n1;
5518 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5520 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5521 OMP_CLAUSE__LOOPTEMP_);
5522 gcc_assert (innerc);
5523 t0 = OMP_CLAUSE_DECL (innerc);
5524 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5525 OMP_CLAUSE__LOOPTEMP_);
5526 gcc_assert (innerc);
5527 t1 = OMP_CLAUSE_DECL (innerc);
5529 if (POINTER_TYPE_P (TREE_TYPE (t0))
5530 && TYPE_PRECISION (TREE_TYPE (t0))
5531 != TYPE_PRECISION (fd->iter_type))
5533 /* Avoid casting pointers to integer of a different size. */
5534 tree itype = signed_type_for (type);
5535 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5536 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5538 else
5540 t1 = fold_convert (fd->iter_type, t1);
5541 t0 = fold_convert (fd->iter_type, t0);
5543 if (bias)
5545 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5546 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5548 if (fd->iter_type == long_integer_type_node)
5550 if (fd->chunk_size)
5552 t = fold_convert (fd->iter_type, fd->chunk_size);
5553 t = build_call_expr (builtin_decl_explicit (start_fn),
5554 6, t0, t1, t2, t, t3, t4);
5556 else
5557 t = build_call_expr (builtin_decl_explicit (start_fn),
5558 5, t0, t1, t2, t3, t4);
5560 else
5562 tree t5;
5563 tree c_bool_type;
5564 tree bfn_decl;
5566 /* The GOMP_loop_ull_*start functions have additional boolean
5567 argument, true for < loops and false for > loops.
5568 In Fortran, the C bool type can be different from
5569 boolean_type_node. */
5570 bfn_decl = builtin_decl_explicit (start_fn);
5571 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5572 t5 = build_int_cst (c_bool_type,
5573 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5574 if (fd->chunk_size)
5576 tree bfn_decl = builtin_decl_explicit (start_fn);
5577 t = fold_convert (fd->iter_type, fd->chunk_size);
5578 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5580 else
5581 t = build_call_expr (builtin_decl_explicit (start_fn),
5582 6, t5, t0, t1, t2, t3, t4);
5585 if (TREE_TYPE (t) != boolean_type_node)
5586 t = fold_build2 (NE_EXPR, boolean_type_node,
5587 t, build_int_cst (TREE_TYPE (t), 0));
5588 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5589 true, GSI_SAME_STMT);
5590 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5592 /* Remove the GIMPLE_OMP_FOR statement. */
5593 gsi_remove (&gsi, true);
5595 /* Iteration setup for sequential loop goes in L0_BB. */
5596 tree startvar = fd->loop.v;
5597 tree endvar = NULL_TREE;
5599 if (gimple_omp_for_combined_p (fd->for_stmt))
5601 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5602 && gimple_omp_for_kind (inner_stmt)
5603 == GF_OMP_FOR_KIND_SIMD);
5604 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5605 OMP_CLAUSE__LOOPTEMP_);
5606 gcc_assert (innerc);
5607 startvar = OMP_CLAUSE_DECL (innerc);
5608 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5609 OMP_CLAUSE__LOOPTEMP_);
5610 gcc_assert (innerc);
5611 endvar = OMP_CLAUSE_DECL (innerc);
5614 gsi = gsi_start_bb (l0_bb);
5615 t = istart0;
5616 if (bias)
5617 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5618 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5619 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5620 t = fold_convert (TREE_TYPE (startvar), t);
5621 t = force_gimple_operand_gsi (&gsi, t,
5622 DECL_P (startvar)
5623 && TREE_ADDRESSABLE (startvar),
5624 NULL_TREE, false, GSI_CONTINUE_LINKING);
5625 stmt = gimple_build_assign (startvar, t);
5626 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5628 t = iend0;
5629 if (bias)
5630 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5631 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5632 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5633 t = fold_convert (TREE_TYPE (startvar), t);
5634 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5635 false, GSI_CONTINUE_LINKING);
5636 if (endvar)
5638 stmt = gimple_build_assign (endvar, iend);
5639 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5640 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
5641 stmt = gimple_build_assign (fd->loop.v, iend);
5642 else
5643 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, iend,
5644 NULL_TREE);
5645 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5647 if (fd->collapse > 1)
5648 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5650 if (!broken_loop)
5652 /* Code to control the increment and predicate for the sequential
5653 loop goes in the CONT_BB. */
5654 gsi = gsi_last_bb (cont_bb);
5655 stmt = gsi_stmt (gsi);
5656 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5657 vmain = gimple_omp_continue_control_use (stmt);
5658 vback = gimple_omp_continue_control_def (stmt);
5660 if (!gimple_omp_for_combined_p (fd->for_stmt))
5662 if (POINTER_TYPE_P (type))
5663 t = fold_build_pointer_plus (vmain, fd->loop.step);
5664 else
5665 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5666 t = force_gimple_operand_gsi (&gsi, t,
5667 DECL_P (vback)
5668 && TREE_ADDRESSABLE (vback),
5669 NULL_TREE, true, GSI_SAME_STMT);
5670 stmt = gimple_build_assign (vback, t);
5671 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5673 t = build2 (fd->loop.cond_code, boolean_type_node,
5674 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5675 iend);
5676 stmt = gimple_build_cond_empty (t);
5677 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5680 /* Remove GIMPLE_OMP_CONTINUE. */
5681 gsi_remove (&gsi, true);
5683 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5684 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5686 /* Emit code to get the next parallel iteration in L2_BB. */
5687 gsi = gsi_start_bb (l2_bb);
5689 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5690 build_fold_addr_expr (istart0),
5691 build_fold_addr_expr (iend0));
5692 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5693 false, GSI_CONTINUE_LINKING);
5694 if (TREE_TYPE (t) != boolean_type_node)
5695 t = fold_build2 (NE_EXPR, boolean_type_node,
5696 t, build_int_cst (TREE_TYPE (t), 0));
5697 stmt = gimple_build_cond_empty (t);
5698 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5701 /* Add the loop cleanup function. */
5702 gsi = gsi_last_bb (exit_bb);
5703 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5704 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5705 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5706 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5707 else
5708 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5709 stmt = gimple_build_call (t, 0);
5710 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5711 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5712 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5713 gsi_remove (&gsi, true);
5715 /* Connect the new blocks. */
5716 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5717 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5719 if (!broken_loop)
5721 gimple_seq phis;
5723 e = find_edge (cont_bb, l3_bb);
5724 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5726 phis = phi_nodes (l3_bb);
5727 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5729 gimple phi = gsi_stmt (gsi);
5730 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5731 PHI_ARG_DEF_FROM_EDGE (phi, e));
5733 remove_edge (e);
5735 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5736 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5737 e = find_edge (cont_bb, l1_bb);
5738 if (gimple_omp_for_combined_p (fd->for_stmt))
5740 remove_edge (e);
5741 e = NULL;
5743 else if (fd->collapse > 1)
5745 remove_edge (e);
5746 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5748 else
5749 e->flags = EDGE_TRUE_VALUE;
5750 if (e)
5752 e->probability = REG_BR_PROB_BASE * 7 / 8;
5753 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5755 else
5757 e = find_edge (cont_bb, l2_bb);
5758 e->flags = EDGE_FALLTHRU;
5760 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5762 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5763 recompute_dominator (CDI_DOMINATORS, l2_bb));
5764 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5765 recompute_dominator (CDI_DOMINATORS, l3_bb));
5766 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5767 recompute_dominator (CDI_DOMINATORS, l0_bb));
5768 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5769 recompute_dominator (CDI_DOMINATORS, l1_bb));
5771 struct loop *outer_loop = alloc_loop ();
5772 outer_loop->header = l0_bb;
5773 outer_loop->latch = l2_bb;
5774 add_loop (outer_loop, l0_bb->loop_father);
5776 if (!gimple_omp_for_combined_p (fd->for_stmt))
5778 struct loop *loop = alloc_loop ();
5779 loop->header = l1_bb;
5780 /* The loop may have multiple latches. */
5781 add_loop (loop, outer_loop);
5787 /* A subroutine of expand_omp_for. Generate code for a parallel
5788 loop with static schedule and no specified chunk size. Given
5789 parameters:
5791 for (V = N1; V cond N2; V += STEP) BODY;
5793 where COND is "<" or ">", we generate pseudocode
5795 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5796 if (cond is <)
5797 adj = STEP - 1;
5798 else
5799 adj = STEP + 1;
5800 if ((__typeof (V)) -1 > 0 && cond is >)
5801 n = -(adj + N2 - N1) / -STEP;
5802 else
5803 n = (adj + N2 - N1) / STEP;
5804 q = n / nthreads;
5805 tt = n % nthreads;
5806 if (threadid < tt) goto L3; else goto L4;
5808 tt = 0;
5809 q = q + 1;
5811 s0 = q * threadid + tt;
5812 e0 = s0 + q;
5813 V = s0 * STEP + N1;
5814 if (s0 >= e0) goto L2; else goto L0;
5816 e = e0 * STEP + N1;
5818 BODY;
5819 V += STEP;
5820 if (V cond e) goto L1;
5824 static void
5825 expand_omp_for_static_nochunk (struct omp_region *region,
5826 struct omp_for_data *fd,
5827 gimple inner_stmt)
5829 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
5830 tree type, itype, vmain, vback;
5831 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
5832 basic_block body_bb, cont_bb, collapse_bb = NULL;
5833 basic_block fin_bb;
5834 gimple_stmt_iterator gsi;
5835 gimple stmt;
5836 edge ep;
5837 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
5838 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
5839 bool broken_loop = region->cont == NULL;
5840 tree *counts = NULL;
5841 tree n1, n2, step;
5843 itype = type = TREE_TYPE (fd->loop.v);
5844 if (POINTER_TYPE_P (type))
5845 itype = signed_type_for (type);
5847 entry_bb = region->entry;
5848 cont_bb = region->cont;
5849 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5850 fin_bb = BRANCH_EDGE (entry_bb)->dest;
5851 gcc_assert (broken_loop
5852 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
5853 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5854 body_bb = single_succ (seq_start_bb);
5855 if (!broken_loop)
5857 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5858 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5860 exit_bb = region->exit;
5862 /* Iteration space partitioning goes in ENTRY_BB. */
5863 gsi = gsi_last_bb (entry_bb);
5864 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5866 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
5868 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
5869 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
5872 if (fd->collapse > 1)
5874 int first_zero_iter = -1;
5875 basic_block l2_dom_bb = NULL;
5877 counts = XALLOCAVEC (tree, fd->collapse);
5878 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5879 fin_bb, first_zero_iter,
5880 l2_dom_bb);
5881 t = NULL_TREE;
5883 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
5884 t = integer_one_node;
5885 else
5886 t = fold_binary (fd->loop.cond_code, boolean_type_node,
5887 fold_convert (type, fd->loop.n1),
5888 fold_convert (type, fd->loop.n2));
5889 if (fd->collapse == 1
5890 && TYPE_UNSIGNED (type)
5891 && (t == NULL_TREE || !integer_onep (t)))
5893 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
5894 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
5895 true, GSI_SAME_STMT);
5896 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
5897 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
5898 true, GSI_SAME_STMT);
5899 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
5900 NULL_TREE, NULL_TREE);
5901 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5902 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5903 expand_omp_regimplify_p, NULL, NULL)
5904 || walk_tree (gimple_cond_rhs_ptr (stmt),
5905 expand_omp_regimplify_p, NULL, NULL))
5907 gsi = gsi_for_stmt (stmt);
5908 gimple_regimplify_operands (stmt, &gsi);
5910 ep = split_block (entry_bb, stmt);
5911 ep->flags = EDGE_TRUE_VALUE;
5912 entry_bb = ep->dest;
5913 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
5914 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
5915 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
5916 if (gimple_in_ssa_p (cfun))
5918 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
5919 for (gsi = gsi_start_phis (fin_bb);
5920 !gsi_end_p (gsi); gsi_next (&gsi))
5922 gimple phi = gsi_stmt (gsi);
5923 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
5924 ep, UNKNOWN_LOCATION);
5927 gsi = gsi_last_bb (entry_bb);
5930 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
5931 t = fold_convert (itype, t);
5932 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5933 true, GSI_SAME_STMT);
5935 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
5936 t = fold_convert (itype, t);
5937 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5938 true, GSI_SAME_STMT);
5940 n1 = fd->loop.n1;
5941 n2 = fd->loop.n2;
5942 step = fd->loop.step;
5943 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5945 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5946 OMP_CLAUSE__LOOPTEMP_);
5947 gcc_assert (innerc);
5948 n1 = OMP_CLAUSE_DECL (innerc);
5949 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5950 OMP_CLAUSE__LOOPTEMP_);
5951 gcc_assert (innerc);
5952 n2 = OMP_CLAUSE_DECL (innerc);
5954 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
5955 true, NULL_TREE, true, GSI_SAME_STMT);
5956 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
5957 true, NULL_TREE, true, GSI_SAME_STMT);
5958 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
5959 true, NULL_TREE, true, GSI_SAME_STMT);
5961 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
5962 t = fold_build2 (PLUS_EXPR, itype, step, t);
5963 t = fold_build2 (PLUS_EXPR, itype, t, n2);
5964 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
5965 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
5966 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5967 fold_build1 (NEGATE_EXPR, itype, t),
5968 fold_build1 (NEGATE_EXPR, itype, step));
5969 else
5970 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
5971 t = fold_convert (itype, t);
5972 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
5974 q = create_tmp_reg (itype, "q");
5975 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
5976 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5977 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
5979 tt = create_tmp_reg (itype, "tt");
5980 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
5981 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5982 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
5984 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
5985 stmt = gimple_build_cond_empty (t);
5986 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5988 second_bb = split_block (entry_bb, stmt)->dest;
5989 gsi = gsi_last_bb (second_bb);
5990 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5992 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
5993 GSI_SAME_STMT);
5994 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
5995 build_int_cst (itype, 1));
5996 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5998 third_bb = split_block (second_bb, stmt)->dest;
5999 gsi = gsi_last_bb (third_bb);
6000 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6002 t = build2 (MULT_EXPR, itype, q, threadid);
6003 t = build2 (PLUS_EXPR, itype, t, tt);
6004 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6006 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6007 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6009 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6010 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6012 /* Remove the GIMPLE_OMP_FOR statement. */
6013 gsi_remove (&gsi, true);
6015 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6016 gsi = gsi_start_bb (seq_start_bb);
6018 tree startvar = fd->loop.v;
6019 tree endvar = NULL_TREE;
6021 if (gimple_omp_for_combined_p (fd->for_stmt))
6023 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6024 ? gimple_omp_parallel_clauses (inner_stmt)
6025 : gimple_omp_for_clauses (inner_stmt);
6026 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6027 gcc_assert (innerc);
6028 startvar = OMP_CLAUSE_DECL (innerc);
6029 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6030 OMP_CLAUSE__LOOPTEMP_);
6031 gcc_assert (innerc);
6032 endvar = OMP_CLAUSE_DECL (innerc);
6034 t = fold_convert (itype, s0);
6035 t = fold_build2 (MULT_EXPR, itype, t, step);
6036 if (POINTER_TYPE_P (type))
6037 t = fold_build_pointer_plus (n1, t);
6038 else
6039 t = fold_build2 (PLUS_EXPR, type, t, n1);
6040 t = fold_convert (TREE_TYPE (startvar), t);
6041 t = force_gimple_operand_gsi (&gsi, t,
6042 DECL_P (startvar)
6043 && TREE_ADDRESSABLE (startvar),
6044 NULL_TREE, false, GSI_CONTINUE_LINKING);
6045 stmt = gimple_build_assign (startvar, t);
6046 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6048 t = fold_convert (itype, e0);
6049 t = fold_build2 (MULT_EXPR, itype, t, step);
6050 if (POINTER_TYPE_P (type))
6051 t = fold_build_pointer_plus (n1, t);
6052 else
6053 t = fold_build2 (PLUS_EXPR, type, t, n1);
6054 t = fold_convert (TREE_TYPE (startvar), t);
6055 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6056 false, GSI_CONTINUE_LINKING);
6057 if (endvar)
6059 stmt = gimple_build_assign (endvar, e);
6060 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6061 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6062 stmt = gimple_build_assign (fd->loop.v, e);
6063 else
6064 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6065 NULL_TREE);
6066 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6068 if (fd->collapse > 1)
6069 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6071 if (!broken_loop)
6073 /* The code controlling the sequential loop replaces the
6074 GIMPLE_OMP_CONTINUE. */
6075 gsi = gsi_last_bb (cont_bb);
6076 stmt = gsi_stmt (gsi);
6077 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6078 vmain = gimple_omp_continue_control_use (stmt);
6079 vback = gimple_omp_continue_control_def (stmt);
6081 if (!gimple_omp_for_combined_p (fd->for_stmt))
6083 if (POINTER_TYPE_P (type))
6084 t = fold_build_pointer_plus (vmain, step);
6085 else
6086 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6087 t = force_gimple_operand_gsi (&gsi, t,
6088 DECL_P (vback)
6089 && TREE_ADDRESSABLE (vback),
6090 NULL_TREE, true, GSI_SAME_STMT);
6091 stmt = gimple_build_assign (vback, t);
6092 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6094 t = build2 (fd->loop.cond_code, boolean_type_node,
6095 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6096 ? t : vback, e);
6097 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6100 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6101 gsi_remove (&gsi, true);
6103 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6104 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6107 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6108 gsi = gsi_last_bb (exit_bb);
6109 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6111 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6112 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6114 gsi_remove (&gsi, true);
6116 /* Connect all the blocks. */
6117 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6118 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6119 ep = find_edge (entry_bb, second_bb);
6120 ep->flags = EDGE_TRUE_VALUE;
6121 ep->probability = REG_BR_PROB_BASE / 4;
6122 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6123 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6125 if (!broken_loop)
6127 ep = find_edge (cont_bb, body_bb);
6128 if (gimple_omp_for_combined_p (fd->for_stmt))
6130 remove_edge (ep);
6131 ep = NULL;
6133 else if (fd->collapse > 1)
6135 remove_edge (ep);
6136 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6138 else
6139 ep->flags = EDGE_TRUE_VALUE;
6140 find_edge (cont_bb, fin_bb)->flags
6141 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6144 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6145 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6146 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6148 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6149 recompute_dominator (CDI_DOMINATORS, body_bb));
6150 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6151 recompute_dominator (CDI_DOMINATORS, fin_bb));
6153 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6155 struct loop *loop = alloc_loop ();
6156 loop->header = body_bb;
6157 if (collapse_bb == NULL)
6158 loop->latch = cont_bb;
6159 add_loop (loop, body_bb->loop_father);
6164 /* A subroutine of expand_omp_for. Generate code for a parallel
6165 loop with static schedule and a specified chunk size. Given
6166 parameters:
6168 for (V = N1; V cond N2; V += STEP) BODY;
6170 where COND is "<" or ">", we generate pseudocode
6172 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6173 if (cond is <)
6174 adj = STEP - 1;
6175 else
6176 adj = STEP + 1;
6177 if ((__typeof (V)) -1 > 0 && cond is >)
6178 n = -(adj + N2 - N1) / -STEP;
6179 else
6180 n = (adj + N2 - N1) / STEP;
6181 trip = 0;
6182 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6183 here so that V is defined
6184 if the loop is not entered
6186 s0 = (trip * nthreads + threadid) * CHUNK;
6187 e0 = min(s0 + CHUNK, n);
6188 if (s0 < n) goto L1; else goto L4;
6190 V = s0 * STEP + N1;
6191 e = e0 * STEP + N1;
6193 BODY;
6194 V += STEP;
6195 if (V cond e) goto L2; else goto L3;
6197 trip += 1;
6198 goto L0;
6202 static void
6203 expand_omp_for_static_chunk (struct omp_region *region,
6204 struct omp_for_data *fd, gimple inner_stmt)
6206 tree n, s0, e0, e, t;
6207 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6208 tree type, itype, vmain, vback, vextra;
6209 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6210 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6211 gimple_stmt_iterator gsi;
6212 gimple stmt;
6213 edge se;
6214 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6215 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6216 bool broken_loop = region->cont == NULL;
6217 tree *counts = NULL;
6218 tree n1, n2, step;
6220 itype = type = TREE_TYPE (fd->loop.v);
6221 if (POINTER_TYPE_P (type))
6222 itype = signed_type_for (type);
6224 entry_bb = region->entry;
6225 se = split_block (entry_bb, last_stmt (entry_bb));
6226 entry_bb = se->src;
6227 iter_part_bb = se->dest;
6228 cont_bb = region->cont;
6229 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6230 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6231 gcc_assert (broken_loop
6232 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6233 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6234 body_bb = single_succ (seq_start_bb);
6235 if (!broken_loop)
6237 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6238 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6239 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6241 exit_bb = region->exit;
6243 /* Trip and adjustment setup goes in ENTRY_BB. */
6244 gsi = gsi_last_bb (entry_bb);
6245 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6247 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6249 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6250 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6253 if (fd->collapse > 1)
6255 int first_zero_iter = -1;
6256 basic_block l2_dom_bb = NULL;
6258 counts = XALLOCAVEC (tree, fd->collapse);
6259 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6260 fin_bb, first_zero_iter,
6261 l2_dom_bb);
6262 t = NULL_TREE;
6264 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6265 t = integer_one_node;
6266 else
6267 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6268 fold_convert (type, fd->loop.n1),
6269 fold_convert (type, fd->loop.n2));
6270 if (fd->collapse == 1
6271 && TYPE_UNSIGNED (type)
6272 && (t == NULL_TREE || !integer_onep (t)))
6274 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6275 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6276 true, GSI_SAME_STMT);
6277 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6278 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6279 true, GSI_SAME_STMT);
6280 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6281 NULL_TREE, NULL_TREE);
6282 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6283 if (walk_tree (gimple_cond_lhs_ptr (stmt),
6284 expand_omp_regimplify_p, NULL, NULL)
6285 || walk_tree (gimple_cond_rhs_ptr (stmt),
6286 expand_omp_regimplify_p, NULL, NULL))
6288 gsi = gsi_for_stmt (stmt);
6289 gimple_regimplify_operands (stmt, &gsi);
6291 se = split_block (entry_bb, stmt);
6292 se->flags = EDGE_TRUE_VALUE;
6293 entry_bb = se->dest;
6294 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6295 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6296 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6297 if (gimple_in_ssa_p (cfun))
6299 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6300 for (gsi = gsi_start_phis (fin_bb);
6301 !gsi_end_p (gsi); gsi_next (&gsi))
6303 gimple phi = gsi_stmt (gsi);
6304 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6305 se, UNKNOWN_LOCATION);
6308 gsi = gsi_last_bb (entry_bb);
6311 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6312 t = fold_convert (itype, t);
6313 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6314 true, GSI_SAME_STMT);
6316 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6317 t = fold_convert (itype, t);
6318 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6319 true, GSI_SAME_STMT);
6321 n1 = fd->loop.n1;
6322 n2 = fd->loop.n2;
6323 step = fd->loop.step;
6324 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6326 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6327 OMP_CLAUSE__LOOPTEMP_);
6328 gcc_assert (innerc);
6329 n1 = OMP_CLAUSE_DECL (innerc);
6330 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6331 OMP_CLAUSE__LOOPTEMP_);
6332 gcc_assert (innerc);
6333 n2 = OMP_CLAUSE_DECL (innerc);
6335 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6336 true, NULL_TREE, true, GSI_SAME_STMT);
6337 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6338 true, NULL_TREE, true, GSI_SAME_STMT);
6339 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6340 true, NULL_TREE, true, GSI_SAME_STMT);
6341 fd->chunk_size
6342 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
6343 true, NULL_TREE, true, GSI_SAME_STMT);
6345 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6346 t = fold_build2 (PLUS_EXPR, itype, step, t);
6347 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6348 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6349 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6350 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6351 fold_build1 (NEGATE_EXPR, itype, t),
6352 fold_build1 (NEGATE_EXPR, itype, step));
6353 else
6354 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6355 t = fold_convert (itype, t);
6356 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6357 true, GSI_SAME_STMT);
6359 trip_var = create_tmp_reg (itype, ".trip");
6360 if (gimple_in_ssa_p (cfun))
6362 trip_init = make_ssa_name (trip_var, NULL);
6363 trip_main = make_ssa_name (trip_var, NULL);
6364 trip_back = make_ssa_name (trip_var, NULL);
6366 else
6368 trip_init = trip_var;
6369 trip_main = trip_var;
6370 trip_back = trip_var;
6373 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
6374 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6376 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6377 t = fold_build2 (MULT_EXPR, itype, t, step);
6378 if (POINTER_TYPE_P (type))
6379 t = fold_build_pointer_plus (n1, t);
6380 else
6381 t = fold_build2 (PLUS_EXPR, type, t, n1);
6382 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6383 true, GSI_SAME_STMT);
6385 /* Remove the GIMPLE_OMP_FOR. */
6386 gsi_remove (&gsi, true);
6388 /* Iteration space partitioning goes in ITER_PART_BB. */
6389 gsi = gsi_last_bb (iter_part_bb);
6391 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6392 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6393 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6394 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6395 false, GSI_CONTINUE_LINKING);
6397 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6398 t = fold_build2 (MIN_EXPR, itype, t, n);
6399 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6400 false, GSI_CONTINUE_LINKING);
6402 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6403 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6405 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6406 gsi = gsi_start_bb (seq_start_bb);
6408 tree startvar = fd->loop.v;
6409 tree endvar = NULL_TREE;
6411 if (gimple_omp_for_combined_p (fd->for_stmt))
6413 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6414 ? gimple_omp_parallel_clauses (inner_stmt)
6415 : gimple_omp_for_clauses (inner_stmt);
6416 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6417 gcc_assert (innerc);
6418 startvar = OMP_CLAUSE_DECL (innerc);
6419 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6420 OMP_CLAUSE__LOOPTEMP_);
6421 gcc_assert (innerc);
6422 endvar = OMP_CLAUSE_DECL (innerc);
6425 t = fold_convert (itype, s0);
6426 t = fold_build2 (MULT_EXPR, itype, t, step);
6427 if (POINTER_TYPE_P (type))
6428 t = fold_build_pointer_plus (n1, t);
6429 else
6430 t = fold_build2 (PLUS_EXPR, type, t, n1);
6431 t = fold_convert (TREE_TYPE (startvar), t);
6432 t = force_gimple_operand_gsi (&gsi, t,
6433 DECL_P (startvar)
6434 && TREE_ADDRESSABLE (startvar),
6435 NULL_TREE, false, GSI_CONTINUE_LINKING);
6436 stmt = gimple_build_assign (startvar, t);
6437 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6439 t = fold_convert (itype, e0);
6440 t = fold_build2 (MULT_EXPR, itype, t, step);
6441 if (POINTER_TYPE_P (type))
6442 t = fold_build_pointer_plus (n1, t);
6443 else
6444 t = fold_build2 (PLUS_EXPR, type, t, n1);
6445 t = fold_convert (TREE_TYPE (startvar), t);
6446 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6447 false, GSI_CONTINUE_LINKING);
6448 if (endvar)
6450 stmt = gimple_build_assign (endvar, e);
6451 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6452 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6453 stmt = gimple_build_assign (fd->loop.v, e);
6454 else
6455 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6456 NULL_TREE);
6457 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6459 if (fd->collapse > 1)
6460 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6462 if (!broken_loop)
6464 /* The code controlling the sequential loop goes in CONT_BB,
6465 replacing the GIMPLE_OMP_CONTINUE. */
6466 gsi = gsi_last_bb (cont_bb);
6467 stmt = gsi_stmt (gsi);
6468 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6469 vmain = gimple_omp_continue_control_use (stmt);
6470 vback = gimple_omp_continue_control_def (stmt);
6472 if (!gimple_omp_for_combined_p (fd->for_stmt))
6474 if (POINTER_TYPE_P (type))
6475 t = fold_build_pointer_plus (vmain, step);
6476 else
6477 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6478 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
6479 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6480 true, GSI_SAME_STMT);
6481 stmt = gimple_build_assign (vback, t);
6482 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6484 t = build2 (fd->loop.cond_code, boolean_type_node,
6485 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6486 ? t : vback, e);
6487 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6490 /* Remove GIMPLE_OMP_CONTINUE. */
6491 gsi_remove (&gsi, true);
6493 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6494 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6496 /* Trip update code goes into TRIP_UPDATE_BB. */
6497 gsi = gsi_start_bb (trip_update_bb);
6499 t = build_int_cst (itype, 1);
6500 t = build2 (PLUS_EXPR, itype, trip_main, t);
6501 stmt = gimple_build_assign (trip_back, t);
6502 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6505 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6506 gsi = gsi_last_bb (exit_bb);
6507 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6509 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6510 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6512 gsi_remove (&gsi, true);
6514 /* Connect the new blocks. */
6515 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6516 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6518 if (!broken_loop)
6520 se = find_edge (cont_bb, body_bb);
6521 if (gimple_omp_for_combined_p (fd->for_stmt))
6523 remove_edge (se);
6524 se = NULL;
6526 else if (fd->collapse > 1)
6528 remove_edge (se);
6529 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6531 else
6532 se->flags = EDGE_TRUE_VALUE;
6533 find_edge (cont_bb, trip_update_bb)->flags
6534 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6536 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6539 if (gimple_in_ssa_p (cfun))
6541 gimple_stmt_iterator psi;
6542 gimple phi;
6543 edge re, ene;
6544 edge_var_map_vector *head;
6545 edge_var_map *vm;
6546 size_t i;
6548 gcc_assert (fd->collapse == 1 && !broken_loop);
6550 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6551 remove arguments of the phi nodes in fin_bb. We need to create
6552 appropriate phi nodes in iter_part_bb instead. */
6553 se = single_pred_edge (fin_bb);
6554 re = single_succ_edge (trip_update_bb);
6555 head = redirect_edge_var_map_vector (re);
6556 ene = single_succ_edge (entry_bb);
6558 psi = gsi_start_phis (fin_bb);
6559 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6560 gsi_next (&psi), ++i)
6562 gimple nphi;
6563 source_location locus;
6565 phi = gsi_stmt (psi);
6566 t = gimple_phi_result (phi);
6567 gcc_assert (t == redirect_edge_var_map_result (vm));
6568 nphi = create_phi_node (t, iter_part_bb);
6570 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6571 locus = gimple_phi_arg_location_from_edge (phi, se);
6573 /* A special case -- fd->loop.v is not yet computed in
6574 iter_part_bb, we need to use vextra instead. */
6575 if (t == fd->loop.v)
6576 t = vextra;
6577 add_phi_arg (nphi, t, ene, locus);
6578 locus = redirect_edge_var_map_location (vm);
6579 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6581 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6582 redirect_edge_var_map_clear (re);
6583 while (1)
6585 psi = gsi_start_phis (fin_bb);
6586 if (gsi_end_p (psi))
6587 break;
6588 remove_phi_node (&psi, false);
6591 /* Make phi node for trip. */
6592 phi = create_phi_node (trip_main, iter_part_bb);
6593 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6594 UNKNOWN_LOCATION);
6595 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6596 UNKNOWN_LOCATION);
6599 if (!broken_loop)
6600 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6601 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6602 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6603 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6604 recompute_dominator (CDI_DOMINATORS, fin_bb));
6605 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6606 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6607 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6608 recompute_dominator (CDI_DOMINATORS, body_bb));
6610 if (!broken_loop)
6612 struct loop *trip_loop = alloc_loop ();
6613 trip_loop->header = iter_part_bb;
6614 trip_loop->latch = trip_update_bb;
6615 add_loop (trip_loop, iter_part_bb->loop_father);
6617 if (!gimple_omp_for_combined_p (fd->for_stmt))
6619 struct loop *loop = alloc_loop ();
6620 loop->header = body_bb;
6621 if (collapse_bb == NULL)
6622 loop->latch = cont_bb;
6623 add_loop (loop, trip_loop);
6629 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
6630 loop. Given parameters:
6632 for (V = N1; V cond N2; V += STEP) BODY;
6634 where COND is "<" or ">", we generate pseudocode
6636 V = N1;
6637 goto L1;
6639 BODY;
6640 V += STEP;
6642 if (V cond N2) goto L0; else goto L2;
6645 For collapsed loops, given parameters:
6646 collapse(3)
6647 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6648 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6649 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6650 BODY;
6652 we generate pseudocode
6654 if (cond3 is <)
6655 adj = STEP3 - 1;
6656 else
6657 adj = STEP3 + 1;
6658 count3 = (adj + N32 - N31) / STEP3;
6659 if (cond2 is <)
6660 adj = STEP2 - 1;
6661 else
6662 adj = STEP2 + 1;
6663 count2 = (adj + N22 - N21) / STEP2;
6664 if (cond1 is <)
6665 adj = STEP1 - 1;
6666 else
6667 adj = STEP1 + 1;
6668 count1 = (adj + N12 - N11) / STEP1;
6669 count = count1 * count2 * count3;
6670 V = 0;
6671 V1 = N11;
6672 V2 = N21;
6673 V3 = N31;
6674 goto L1;
6676 BODY;
6677 V += 1;
6678 V3 += STEP3;
6679 V2 += (V3 cond3 N32) ? 0 : STEP2;
6680 V3 = (V3 cond3 N32) ? V3 : N31;
6681 V1 += (V2 cond2 N22) ? 0 : STEP1;
6682 V2 = (V2 cond2 N22) ? V2 : N21;
6684 if (V < count) goto L0; else goto L2;
6689 static void
6690 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
6692 tree type, t;
6693 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
6694 gimple_stmt_iterator gsi;
6695 gimple stmt;
6696 bool broken_loop = region->cont == NULL;
6697 edge e, ne;
6698 tree *counts = NULL;
6699 int i;
6700 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6701 OMP_CLAUSE_SAFELEN);
6702 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6703 OMP_CLAUSE__SIMDUID_);
6704 tree n1, n2;
6706 type = TREE_TYPE (fd->loop.v);
6707 entry_bb = region->entry;
6708 cont_bb = region->cont;
6709 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6710 gcc_assert (broken_loop
6711 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6712 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6713 if (!broken_loop)
6715 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6716 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6717 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6718 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6720 else
6722 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6723 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6724 l2_bb = single_succ (l1_bb);
6726 exit_bb = region->exit;
6727 l2_dom_bb = NULL;
6729 gsi = gsi_last_bb (entry_bb);
6731 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6732 /* Not needed in SSA form right now. */
6733 gcc_assert (!gimple_in_ssa_p (cfun));
6734 if (fd->collapse > 1)
6736 int first_zero_iter = -1;
6737 basic_block zero_iter_bb = l2_bb;
6739 counts = XALLOCAVEC (tree, fd->collapse);
6740 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6741 zero_iter_bb, first_zero_iter,
6742 l2_dom_bb);
6744 if (l2_dom_bb == NULL)
6745 l2_dom_bb = l1_bb;
6747 n1 = fd->loop.n1;
6748 n2 = fd->loop.n2;
6749 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6751 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6752 OMP_CLAUSE__LOOPTEMP_);
6753 gcc_assert (innerc);
6754 n1 = OMP_CLAUSE_DECL (innerc);
6755 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6756 OMP_CLAUSE__LOOPTEMP_);
6757 gcc_assert (innerc);
6758 n2 = OMP_CLAUSE_DECL (innerc);
6759 expand_omp_build_assign (&gsi, fd->loop.v,
6760 fold_convert (type, n1));
6761 if (fd->collapse > 1)
6763 gsi_prev (&gsi);
6764 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
6765 gsi_next (&gsi);
6768 else
6770 expand_omp_build_assign (&gsi, fd->loop.v,
6771 fold_convert (type, fd->loop.n1));
6772 if (fd->collapse > 1)
6773 for (i = 0; i < fd->collapse; i++)
6775 tree itype = TREE_TYPE (fd->loops[i].v);
6776 if (POINTER_TYPE_P (itype))
6777 itype = signed_type_for (itype);
6778 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
6779 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6783 /* Remove the GIMPLE_OMP_FOR statement. */
6784 gsi_remove (&gsi, true);
6786 if (!broken_loop)
6788 /* Code to control the increment goes in the CONT_BB. */
6789 gsi = gsi_last_bb (cont_bb);
6790 stmt = gsi_stmt (gsi);
6791 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6793 if (POINTER_TYPE_P (type))
6794 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
6795 else
6796 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
6797 expand_omp_build_assign (&gsi, fd->loop.v, t);
6799 if (fd->collapse > 1)
6801 i = fd->collapse - 1;
6802 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
6804 t = fold_convert (sizetype, fd->loops[i].step);
6805 t = fold_build_pointer_plus (fd->loops[i].v, t);
6807 else
6809 t = fold_convert (TREE_TYPE (fd->loops[i].v),
6810 fd->loops[i].step);
6811 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
6812 fd->loops[i].v, t);
6814 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6816 for (i = fd->collapse - 1; i > 0; i--)
6818 tree itype = TREE_TYPE (fd->loops[i].v);
6819 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
6820 if (POINTER_TYPE_P (itype2))
6821 itype2 = signed_type_for (itype2);
6822 t = build3 (COND_EXPR, itype2,
6823 build2 (fd->loops[i].cond_code, boolean_type_node,
6824 fd->loops[i].v,
6825 fold_convert (itype, fd->loops[i].n2)),
6826 build_int_cst (itype2, 0),
6827 fold_convert (itype2, fd->loops[i - 1].step));
6828 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
6829 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
6830 else
6831 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
6832 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
6834 t = build3 (COND_EXPR, itype,
6835 build2 (fd->loops[i].cond_code, boolean_type_node,
6836 fd->loops[i].v,
6837 fold_convert (itype, fd->loops[i].n2)),
6838 fd->loops[i].v,
6839 fold_convert (itype, fd->loops[i].n1));
6840 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6844 /* Remove GIMPLE_OMP_CONTINUE. */
6845 gsi_remove (&gsi, true);
6848 /* Emit the condition in L1_BB. */
6849 gsi = gsi_start_bb (l1_bb);
6851 t = fold_convert (type, n2);
6852 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6853 false, GSI_CONTINUE_LINKING);
6854 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
6855 stmt = gimple_build_cond_empty (t);
6856 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6857 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
6858 NULL, NULL)
6859 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
6860 NULL, NULL))
6862 gsi = gsi_for_stmt (stmt);
6863 gimple_regimplify_operands (stmt, &gsi);
6866 /* Remove GIMPLE_OMP_RETURN. */
6867 gsi = gsi_last_bb (exit_bb);
6868 gsi_remove (&gsi, true);
6870 /* Connect the new blocks. */
6871 remove_edge (FALLTHRU_EDGE (entry_bb));
6873 if (!broken_loop)
6875 remove_edge (BRANCH_EDGE (entry_bb));
6876 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6878 e = BRANCH_EDGE (l1_bb);
6879 ne = FALLTHRU_EDGE (l1_bb);
6880 e->flags = EDGE_TRUE_VALUE;
6882 else
6884 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6886 ne = single_succ_edge (l1_bb);
6887 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6890 ne->flags = EDGE_FALSE_VALUE;
6891 e->probability = REG_BR_PROB_BASE * 7 / 8;
6892 ne->probability = REG_BR_PROB_BASE / 8;
6894 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6895 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6896 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6898 if (!broken_loop)
6900 struct loop *loop = alloc_loop ();
6901 loop->header = l1_bb;
6902 loop->latch = cont_bb;
6903 add_loop (loop, l1_bb->loop_father);
6904 if (safelen == NULL_TREE)
6905 loop->safelen = INT_MAX;
6906 else
6908 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
6909 if (TREE_CODE (safelen) != INTEGER_CST)
6910 loop->safelen = 0;
6911 else if (!tree_fits_uhwi_p (safelen)
6912 || tree_to_uhwi (safelen) > INT_MAX)
6913 loop->safelen = INT_MAX;
6914 else
6915 loop->safelen = tree_to_uhwi (safelen);
6916 if (loop->safelen == 1)
6917 loop->safelen = 0;
6919 if (simduid)
6921 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
6922 cfun->has_simduid_loops = true;
6924 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
6925 the loop. */
6926 if ((flag_tree_loop_vectorize
6927 || (!global_options_set.x_flag_tree_loop_vectorize
6928 && !global_options_set.x_flag_tree_vectorize))
6929 && flag_tree_loop_optimize
6930 && loop->safelen > 1)
6932 loop->force_vectorize = true;
6933 cfun->has_force_vectorize_loops = true;
6939 /* Expand the OpenMP loop defined by REGION. */
6941 static void
6942 expand_omp_for (struct omp_region *region, gimple inner_stmt)
6944 struct omp_for_data fd;
6945 struct omp_for_data_loop *loops;
6947 loops
6948 = (struct omp_for_data_loop *)
6949 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
6950 * sizeof (struct omp_for_data_loop));
6951 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
6952 region->sched_kind = fd.sched_kind;
6954 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
6955 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6956 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6957 if (region->cont)
6959 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
6960 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6961 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6963 else
6964 /* If there isn't a continue then this is a degerate case where
6965 the introduction of abnormal edges during lowering will prevent
6966 original loops from being detected. Fix that up. */
6967 loops_state_set (LOOPS_NEED_FIXUP);
6969 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
6970 expand_omp_simd (region, &fd);
6971 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
6972 && !fd.have_ordered)
6974 if (fd.chunk_size == NULL)
6975 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
6976 else
6977 expand_omp_for_static_chunk (region, &fd, inner_stmt);
6979 else
6981 int fn_index, start_ix, next_ix;
6983 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
6984 == GF_OMP_FOR_KIND_FOR);
6985 if (fd.chunk_size == NULL
6986 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
6987 fd.chunk_size = integer_zero_node;
6988 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
6989 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
6990 ? 3 : fd.sched_kind;
6991 fn_index += fd.have_ordered * 4;
6992 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
6993 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
6994 if (fd.iter_type == long_long_unsigned_type_node)
6996 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
6997 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
6998 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
6999 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
7001 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
7002 (enum built_in_function) next_ix, inner_stmt);
7005 if (gimple_in_ssa_p (cfun))
7006 update_ssa (TODO_update_ssa_only_virtuals);
7010 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7012 v = GOMP_sections_start (n);
7014 switch (v)
7016 case 0:
7017 goto L2;
7018 case 1:
7019 section 1;
7020 goto L1;
7021 case 2:
7023 case n:
7025 default:
7026 abort ();
7029 v = GOMP_sections_next ();
7030 goto L0;
7032 reduction;
7034 If this is a combined parallel sections, replace the call to
7035 GOMP_sections_start with call to GOMP_sections_next. */
7037 static void
7038 expand_omp_sections (struct omp_region *region)
7040 tree t, u, vin = NULL, vmain, vnext, l2;
7041 unsigned len;
7042 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7043 gimple_stmt_iterator si, switch_si;
7044 gimple sections_stmt, stmt, cont;
7045 edge_iterator ei;
7046 edge e;
7047 struct omp_region *inner;
7048 unsigned i, casei;
7049 bool exit_reachable = region->cont != NULL;
7051 gcc_assert (region->exit != NULL);
7052 entry_bb = region->entry;
7053 l0_bb = single_succ (entry_bb);
7054 l1_bb = region->cont;
7055 l2_bb = region->exit;
7056 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7057 l2 = gimple_block_label (l2_bb);
7058 else
7060 /* This can happen if there are reductions. */
7061 len = EDGE_COUNT (l0_bb->succs);
7062 gcc_assert (len > 0);
7063 e = EDGE_SUCC (l0_bb, len - 1);
7064 si = gsi_last_bb (e->dest);
7065 l2 = NULL_TREE;
7066 if (gsi_end_p (si)
7067 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7068 l2 = gimple_block_label (e->dest);
7069 else
7070 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7072 si = gsi_last_bb (e->dest);
7073 if (gsi_end_p (si)
7074 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7076 l2 = gimple_block_label (e->dest);
7077 break;
7081 if (exit_reachable)
7082 default_bb = create_empty_bb (l1_bb->prev_bb);
7083 else
7084 default_bb = create_empty_bb (l0_bb);
7086 /* We will build a switch() with enough cases for all the
7087 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7088 and a default case to abort if something goes wrong. */
7089 len = EDGE_COUNT (l0_bb->succs);
7091 /* Use vec::quick_push on label_vec throughout, since we know the size
7092 in advance. */
7093 auto_vec<tree> label_vec (len);
7095 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7096 GIMPLE_OMP_SECTIONS statement. */
7097 si = gsi_last_bb (entry_bb);
7098 sections_stmt = gsi_stmt (si);
7099 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7100 vin = gimple_omp_sections_control (sections_stmt);
7101 if (!is_combined_parallel (region))
7103 /* If we are not inside a combined parallel+sections region,
7104 call GOMP_sections_start. */
7105 t = build_int_cst (unsigned_type_node, len - 1);
7106 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7107 stmt = gimple_build_call (u, 1, t);
7109 else
7111 /* Otherwise, call GOMP_sections_next. */
7112 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7113 stmt = gimple_build_call (u, 0);
7115 gimple_call_set_lhs (stmt, vin);
7116 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7117 gsi_remove (&si, true);
7119 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7120 L0_BB. */
7121 switch_si = gsi_last_bb (l0_bb);
7122 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
7123 if (exit_reachable)
7125 cont = last_stmt (l1_bb);
7126 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
7127 vmain = gimple_omp_continue_control_use (cont);
7128 vnext = gimple_omp_continue_control_def (cont);
7130 else
7132 vmain = vin;
7133 vnext = NULL_TREE;
7136 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
7137 label_vec.quick_push (t);
7138 i = 1;
7140 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7141 for (inner = region->inner, casei = 1;
7142 inner;
7143 inner = inner->next, i++, casei++)
7145 basic_block s_entry_bb, s_exit_bb;
7147 /* Skip optional reduction region. */
7148 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7150 --i;
7151 --casei;
7152 continue;
7155 s_entry_bb = inner->entry;
7156 s_exit_bb = inner->exit;
7158 t = gimple_block_label (s_entry_bb);
7159 u = build_int_cst (unsigned_type_node, casei);
7160 u = build_case_label (u, NULL, t);
7161 label_vec.quick_push (u);
7163 si = gsi_last_bb (s_entry_bb);
7164 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7165 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7166 gsi_remove (&si, true);
7167 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7169 if (s_exit_bb == NULL)
7170 continue;
7172 si = gsi_last_bb (s_exit_bb);
7173 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7174 gsi_remove (&si, true);
7176 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7179 /* Error handling code goes in DEFAULT_BB. */
7180 t = gimple_block_label (default_bb);
7181 u = build_case_label (NULL, NULL, t);
7182 make_edge (l0_bb, default_bb, 0);
7183 add_bb_to_loop (default_bb, current_loops->tree_root);
7185 stmt = gimple_build_switch (vmain, u, label_vec);
7186 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7187 gsi_remove (&switch_si, true);
7189 si = gsi_start_bb (default_bb);
7190 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7191 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7193 if (exit_reachable)
7195 tree bfn_decl;
7197 /* Code to get the next section goes in L1_BB. */
7198 si = gsi_last_bb (l1_bb);
7199 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7201 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7202 stmt = gimple_build_call (bfn_decl, 0);
7203 gimple_call_set_lhs (stmt, vnext);
7204 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7205 gsi_remove (&si, true);
7207 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7210 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7211 si = gsi_last_bb (l2_bb);
7212 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7213 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7214 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7215 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7216 else
7217 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7218 stmt = gimple_build_call (t, 0);
7219 if (gimple_omp_return_lhs (gsi_stmt (si)))
7220 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7221 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7222 gsi_remove (&si, true);
7224 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7228 /* Expand code for an OpenMP single directive. We've already expanded
7229 much of the code, here we simply place the GOMP_barrier call. */
7231 static void
7232 expand_omp_single (struct omp_region *region)
7234 basic_block entry_bb, exit_bb;
7235 gimple_stmt_iterator si;
7237 entry_bb = region->entry;
7238 exit_bb = region->exit;
7240 si = gsi_last_bb (entry_bb);
7241 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7242 gsi_remove (&si, true);
7243 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7245 si = gsi_last_bb (exit_bb);
7246 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7248 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7249 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7251 gsi_remove (&si, true);
7252 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7256 /* Generic expansion for OpenMP synchronization directives: master,
7257 ordered and critical. All we need to do here is remove the entry
7258 and exit markers for REGION. */
7260 static void
7261 expand_omp_synch (struct omp_region *region)
7263 basic_block entry_bb, exit_bb;
7264 gimple_stmt_iterator si;
7266 entry_bb = region->entry;
7267 exit_bb = region->exit;
7269 si = gsi_last_bb (entry_bb);
7270 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7271 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7272 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7273 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7274 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7275 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7276 gsi_remove (&si, true);
7277 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7279 if (exit_bb)
7281 si = gsi_last_bb (exit_bb);
7282 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7283 gsi_remove (&si, true);
7284 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7288 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7289 operation as a normal volatile load. */
7291 static bool
7292 expand_omp_atomic_load (basic_block load_bb, tree addr,
7293 tree loaded_val, int index)
7295 enum built_in_function tmpbase;
7296 gimple_stmt_iterator gsi;
7297 basic_block store_bb;
7298 location_t loc;
7299 gimple stmt;
7300 tree decl, call, type, itype;
7302 gsi = gsi_last_bb (load_bb);
7303 stmt = gsi_stmt (gsi);
7304 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7305 loc = gimple_location (stmt);
7307 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7308 is smaller than word size, then expand_atomic_load assumes that the load
7309 is atomic. We could avoid the builtin entirely in this case. */
7311 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7312 decl = builtin_decl_explicit (tmpbase);
7313 if (decl == NULL_TREE)
7314 return false;
7316 type = TREE_TYPE (loaded_val);
7317 itype = TREE_TYPE (TREE_TYPE (decl));
7319 call = build_call_expr_loc (loc, decl, 2, addr,
7320 build_int_cst (NULL,
7321 gimple_omp_atomic_seq_cst_p (stmt)
7322 ? MEMMODEL_SEQ_CST
7323 : MEMMODEL_RELAXED));
7324 if (!useless_type_conversion_p (type, itype))
7325 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7326 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7328 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7329 gsi_remove (&gsi, true);
7331 store_bb = single_succ (load_bb);
7332 gsi = gsi_last_bb (store_bb);
7333 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7334 gsi_remove (&gsi, true);
7336 if (gimple_in_ssa_p (cfun))
7337 update_ssa (TODO_update_ssa_no_phi);
7339 return true;
7342 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7343 operation as a normal volatile store. */
7345 static bool
7346 expand_omp_atomic_store (basic_block load_bb, tree addr,
7347 tree loaded_val, tree stored_val, int index)
7349 enum built_in_function tmpbase;
7350 gimple_stmt_iterator gsi;
7351 basic_block store_bb = single_succ (load_bb);
7352 location_t loc;
7353 gimple stmt;
7354 tree decl, call, type, itype;
7355 enum machine_mode imode;
7356 bool exchange;
7358 gsi = gsi_last_bb (load_bb);
7359 stmt = gsi_stmt (gsi);
7360 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7362 /* If the load value is needed, then this isn't a store but an exchange. */
7363 exchange = gimple_omp_atomic_need_value_p (stmt);
7365 gsi = gsi_last_bb (store_bb);
7366 stmt = gsi_stmt (gsi);
7367 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7368 loc = gimple_location (stmt);
7370 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7371 is smaller than word size, then expand_atomic_store assumes that the store
7372 is atomic. We could avoid the builtin entirely in this case. */
7374 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7375 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7376 decl = builtin_decl_explicit (tmpbase);
7377 if (decl == NULL_TREE)
7378 return false;
7380 type = TREE_TYPE (stored_val);
7382 /* Dig out the type of the function's second argument. */
7383 itype = TREE_TYPE (decl);
7384 itype = TYPE_ARG_TYPES (itype);
7385 itype = TREE_CHAIN (itype);
7386 itype = TREE_VALUE (itype);
7387 imode = TYPE_MODE (itype);
7389 if (exchange && !can_atomic_exchange_p (imode, true))
7390 return false;
7392 if (!useless_type_conversion_p (itype, type))
7393 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7394 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7395 build_int_cst (NULL,
7396 gimple_omp_atomic_seq_cst_p (stmt)
7397 ? MEMMODEL_SEQ_CST
7398 : MEMMODEL_RELAXED));
7399 if (exchange)
7401 if (!useless_type_conversion_p (type, itype))
7402 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7403 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7406 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7407 gsi_remove (&gsi, true);
7409 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7410 gsi = gsi_last_bb (load_bb);
7411 gsi_remove (&gsi, true);
7413 if (gimple_in_ssa_p (cfun))
7414 update_ssa (TODO_update_ssa_no_phi);
7416 return true;
7419 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7420 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7421 size of the data type, and thus usable to find the index of the builtin
7422 decl. Returns false if the expression is not of the proper form. */
7424 static bool
7425 expand_omp_atomic_fetch_op (basic_block load_bb,
7426 tree addr, tree loaded_val,
7427 tree stored_val, int index)
7429 enum built_in_function oldbase, newbase, tmpbase;
7430 tree decl, itype, call;
7431 tree lhs, rhs;
7432 basic_block store_bb = single_succ (load_bb);
7433 gimple_stmt_iterator gsi;
7434 gimple stmt;
7435 location_t loc;
7436 enum tree_code code;
7437 bool need_old, need_new;
7438 enum machine_mode imode;
7439 bool seq_cst;
7441 /* We expect to find the following sequences:
7443 load_bb:
7444 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7446 store_bb:
7447 val = tmp OP something; (or: something OP tmp)
7448 GIMPLE_OMP_STORE (val)
7450 ???FIXME: Allow a more flexible sequence.
7451 Perhaps use data flow to pick the statements.
7455 gsi = gsi_after_labels (store_bb);
7456 stmt = gsi_stmt (gsi);
7457 loc = gimple_location (stmt);
7458 if (!is_gimple_assign (stmt))
7459 return false;
7460 gsi_next (&gsi);
7461 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7462 return false;
7463 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7464 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7465 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7466 gcc_checking_assert (!need_old || !need_new);
7468 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7469 return false;
7471 /* Check for one of the supported fetch-op operations. */
7472 code = gimple_assign_rhs_code (stmt);
7473 switch (code)
7475 case PLUS_EXPR:
7476 case POINTER_PLUS_EXPR:
7477 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7478 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7479 break;
7480 case MINUS_EXPR:
7481 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7482 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7483 break;
7484 case BIT_AND_EXPR:
7485 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7486 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7487 break;
7488 case BIT_IOR_EXPR:
7489 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7490 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7491 break;
7492 case BIT_XOR_EXPR:
7493 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7494 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7495 break;
7496 default:
7497 return false;
7500 /* Make sure the expression is of the proper form. */
7501 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7502 rhs = gimple_assign_rhs2 (stmt);
7503 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7504 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7505 rhs = gimple_assign_rhs1 (stmt);
7506 else
7507 return false;
7509 tmpbase = ((enum built_in_function)
7510 ((need_new ? newbase : oldbase) + index + 1));
7511 decl = builtin_decl_explicit (tmpbase);
7512 if (decl == NULL_TREE)
7513 return false;
7514 itype = TREE_TYPE (TREE_TYPE (decl));
7515 imode = TYPE_MODE (itype);
7517 /* We could test all of the various optabs involved, but the fact of the
7518 matter is that (with the exception of i486 vs i586 and xadd) all targets
7519 that support any atomic operaton optab also implements compare-and-swap.
7520 Let optabs.c take care of expanding any compare-and-swap loop. */
7521 if (!can_compare_and_swap_p (imode, true))
7522 return false;
7524 gsi = gsi_last_bb (load_bb);
7525 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7527 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7528 It only requires that the operation happen atomically. Thus we can
7529 use the RELAXED memory model. */
7530 call = build_call_expr_loc (loc, decl, 3, addr,
7531 fold_convert_loc (loc, itype, rhs),
7532 build_int_cst (NULL,
7533 seq_cst ? MEMMODEL_SEQ_CST
7534 : MEMMODEL_RELAXED));
7536 if (need_old || need_new)
7538 lhs = need_old ? loaded_val : stored_val;
7539 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7540 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7542 else
7543 call = fold_convert_loc (loc, void_type_node, call);
7544 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7545 gsi_remove (&gsi, true);
7547 gsi = gsi_last_bb (store_bb);
7548 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7549 gsi_remove (&gsi, true);
7550 gsi = gsi_last_bb (store_bb);
7551 gsi_remove (&gsi, true);
7553 if (gimple_in_ssa_p (cfun))
7554 update_ssa (TODO_update_ssa_no_phi);
7556 return true;
7559 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7561 oldval = *addr;
7562 repeat:
7563 newval = rhs; // with oldval replacing *addr in rhs
7564 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7565 if (oldval != newval)
7566 goto repeat;
7568 INDEX is log2 of the size of the data type, and thus usable to find the
7569 index of the builtin decl. */
7571 static bool
7572 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7573 tree addr, tree loaded_val, tree stored_val,
7574 int index)
7576 tree loadedi, storedi, initial, new_storedi, old_vali;
7577 tree type, itype, cmpxchg, iaddr;
7578 gimple_stmt_iterator si;
7579 basic_block loop_header = single_succ (load_bb);
7580 gimple phi, stmt;
7581 edge e;
7582 enum built_in_function fncode;
7584 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7585 order to use the RELAXED memory model effectively. */
7586 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7587 + index + 1);
7588 cmpxchg = builtin_decl_explicit (fncode);
7589 if (cmpxchg == NULL_TREE)
7590 return false;
7591 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7592 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7594 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7595 return false;
7597 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7598 si = gsi_last_bb (load_bb);
7599 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7601 /* For floating-point values, we'll need to view-convert them to integers
7602 so that we can perform the atomic compare and swap. Simplify the
7603 following code by always setting up the "i"ntegral variables. */
7604 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7606 tree iaddr_val;
7608 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7609 true), NULL);
7610 iaddr_val
7611 = force_gimple_operand_gsi (&si,
7612 fold_convert (TREE_TYPE (iaddr), addr),
7613 false, NULL_TREE, true, GSI_SAME_STMT);
7614 stmt = gimple_build_assign (iaddr, iaddr_val);
7615 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7616 loadedi = create_tmp_var (itype, NULL);
7617 if (gimple_in_ssa_p (cfun))
7618 loadedi = make_ssa_name (loadedi, NULL);
7620 else
7622 iaddr = addr;
7623 loadedi = loaded_val;
7626 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7627 tree loaddecl = builtin_decl_explicit (fncode);
7628 if (loaddecl)
7629 initial
7630 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
7631 build_call_expr (loaddecl, 2, iaddr,
7632 build_int_cst (NULL_TREE,
7633 MEMMODEL_RELAXED)));
7634 else
7635 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
7636 build_int_cst (TREE_TYPE (iaddr), 0));
7638 initial
7639 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
7640 GSI_SAME_STMT);
7642 /* Move the value to the LOADEDI temporary. */
7643 if (gimple_in_ssa_p (cfun))
7645 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
7646 phi = create_phi_node (loadedi, loop_header);
7647 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
7648 initial);
7650 else
7651 gsi_insert_before (&si,
7652 gimple_build_assign (loadedi, initial),
7653 GSI_SAME_STMT);
7654 if (loadedi != loaded_val)
7656 gimple_stmt_iterator gsi2;
7657 tree x;
7659 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
7660 gsi2 = gsi_start_bb (loop_header);
7661 if (gimple_in_ssa_p (cfun))
7663 gimple stmt;
7664 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7665 true, GSI_SAME_STMT);
7666 stmt = gimple_build_assign (loaded_val, x);
7667 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
7669 else
7671 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
7672 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7673 true, GSI_SAME_STMT);
7676 gsi_remove (&si, true);
7678 si = gsi_last_bb (store_bb);
7679 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
7681 if (iaddr == addr)
7682 storedi = stored_val;
7683 else
7684 storedi =
7685 force_gimple_operand_gsi (&si,
7686 build1 (VIEW_CONVERT_EXPR, itype,
7687 stored_val), true, NULL_TREE, true,
7688 GSI_SAME_STMT);
7690 /* Build the compare&swap statement. */
7691 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
7692 new_storedi = force_gimple_operand_gsi (&si,
7693 fold_convert (TREE_TYPE (loadedi),
7694 new_storedi),
7695 true, NULL_TREE,
7696 true, GSI_SAME_STMT);
7698 if (gimple_in_ssa_p (cfun))
7699 old_vali = loadedi;
7700 else
7702 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
7703 stmt = gimple_build_assign (old_vali, loadedi);
7704 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7706 stmt = gimple_build_assign (loadedi, new_storedi);
7707 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7710 /* Note that we always perform the comparison as an integer, even for
7711 floating point. This allows the atomic operation to properly
7712 succeed even with NaNs and -0.0. */
7713 stmt = gimple_build_cond_empty
7714 (build2 (NE_EXPR, boolean_type_node,
7715 new_storedi, old_vali));
7716 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7718 /* Update cfg. */
7719 e = single_succ_edge (store_bb);
7720 e->flags &= ~EDGE_FALLTHRU;
7721 e->flags |= EDGE_FALSE_VALUE;
7723 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
7725 /* Copy the new value to loadedi (we already did that before the condition
7726 if we are not in SSA). */
7727 if (gimple_in_ssa_p (cfun))
7729 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
7730 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
7733 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
7734 gsi_remove (&si, true);
7736 struct loop *loop = alloc_loop ();
7737 loop->header = loop_header;
7738 loop->latch = store_bb;
7739 add_loop (loop, loop_header->loop_father);
7741 if (gimple_in_ssa_p (cfun))
7742 update_ssa (TODO_update_ssa_no_phi);
7744 return true;
7747 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7749 GOMP_atomic_start ();
7750 *addr = rhs;
7751 GOMP_atomic_end ();
7753 The result is not globally atomic, but works so long as all parallel
7754 references are within #pragma omp atomic directives. According to
7755 responses received from omp@openmp.org, appears to be within spec.
7756 Which makes sense, since that's how several other compilers handle
7757 this situation as well.
7758 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
7759 expanding. STORED_VAL is the operand of the matching
7760 GIMPLE_OMP_ATOMIC_STORE.
7762 We replace
7763 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
7764 loaded_val = *addr;
7766 and replace
7767 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
7768 *addr = stored_val;
7771 static bool
7772 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
7773 tree addr, tree loaded_val, tree stored_val)
7775 gimple_stmt_iterator si;
7776 gimple stmt;
7777 tree t;
7779 si = gsi_last_bb (load_bb);
7780 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7782 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
7783 t = build_call_expr (t, 0);
7784 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
7786 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
7787 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7788 gsi_remove (&si, true);
7790 si = gsi_last_bb (store_bb);
7791 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
7793 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
7794 stored_val);
7795 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7797 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
7798 t = build_call_expr (t, 0);
7799 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
7800 gsi_remove (&si, true);
7802 if (gimple_in_ssa_p (cfun))
7803 update_ssa (TODO_update_ssa_no_phi);
7804 return true;
7807 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
7808 using expand_omp_atomic_fetch_op. If it failed, we try to
7809 call expand_omp_atomic_pipeline, and if it fails too, the
7810 ultimate fallback is wrapping the operation in a mutex
7811 (expand_omp_atomic_mutex). REGION is the atomic region built
7812 by build_omp_regions_1(). */
7814 static void
7815 expand_omp_atomic (struct omp_region *region)
7817 basic_block load_bb = region->entry, store_bb = region->exit;
7818 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
7819 tree loaded_val = gimple_omp_atomic_load_lhs (load);
7820 tree addr = gimple_omp_atomic_load_rhs (load);
7821 tree stored_val = gimple_omp_atomic_store_val (store);
7822 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7823 HOST_WIDE_INT index;
7825 /* Make sure the type is one of the supported sizes. */
7826 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
7827 index = exact_log2 (index);
7828 if (index >= 0 && index <= 4)
7830 unsigned int align = TYPE_ALIGN_UNIT (type);
7832 /* __sync builtins require strict data alignment. */
7833 if (exact_log2 (align) >= index)
7835 /* Atomic load. */
7836 if (loaded_val == stored_val
7837 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7838 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7839 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
7840 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
7841 return;
7843 /* Atomic store. */
7844 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7845 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7846 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
7847 && store_bb == single_succ (load_bb)
7848 && first_stmt (store_bb) == store
7849 && expand_omp_atomic_store (load_bb, addr, loaded_val,
7850 stored_val, index))
7851 return;
7853 /* When possible, use specialized atomic update functions. */
7854 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
7855 && store_bb == single_succ (load_bb)
7856 && expand_omp_atomic_fetch_op (load_bb, addr,
7857 loaded_val, stored_val, index))
7858 return;
7860 /* If we don't have specialized __sync builtins, try and implement
7861 as a compare and swap loop. */
7862 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
7863 loaded_val, stored_val, index))
7864 return;
7868 /* The ultimate fallback is wrapping the operation in a mutex. */
7869 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
7873 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
7875 static void
7876 expand_omp_target (struct omp_region *region)
7878 basic_block entry_bb, exit_bb, new_bb;
7879 struct function *child_cfun = NULL;
7880 tree child_fn = NULL_TREE, block, t;
7881 gimple_stmt_iterator gsi;
7882 gimple entry_stmt, stmt;
7883 edge e;
7885 entry_stmt = last_stmt (region->entry);
7886 new_bb = region->entry;
7887 int kind = gimple_omp_target_kind (entry_stmt);
7888 if (kind == GF_OMP_TARGET_KIND_REGION)
7890 child_fn = gimple_omp_target_child_fn (entry_stmt);
7891 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7894 entry_bb = region->entry;
7895 exit_bb = region->exit;
7897 if (kind == GF_OMP_TARGET_KIND_REGION)
7899 unsigned srcidx, dstidx, num;
7901 /* If the target region needs data sent from the parent
7902 function, then the very first statement (except possible
7903 tree profile counter updates) of the parallel body
7904 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
7905 &.OMP_DATA_O is passed as an argument to the child function,
7906 we need to replace it with the argument as seen by the child
7907 function.
7909 In most cases, this will end up being the identity assignment
7910 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
7911 a function call that has been inlined, the original PARM_DECL
7912 .OMP_DATA_I may have been converted into a different local
7913 variable. In which case, we need to keep the assignment. */
7914 if (gimple_omp_target_data_arg (entry_stmt))
7916 basic_block entry_succ_bb = single_succ (entry_bb);
7917 gimple_stmt_iterator gsi;
7918 tree arg;
7919 gimple tgtcopy_stmt = NULL;
7920 tree sender
7921 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
7923 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
7925 gcc_assert (!gsi_end_p (gsi));
7926 stmt = gsi_stmt (gsi);
7927 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7928 continue;
7930 if (gimple_num_ops (stmt) == 2)
7932 tree arg = gimple_assign_rhs1 (stmt);
7934 /* We're ignoring the subcode because we're
7935 effectively doing a STRIP_NOPS. */
7937 if (TREE_CODE (arg) == ADDR_EXPR
7938 && TREE_OPERAND (arg, 0) == sender)
7940 tgtcopy_stmt = stmt;
7941 break;
7946 gcc_assert (tgtcopy_stmt != NULL);
7947 arg = DECL_ARGUMENTS (child_fn);
7949 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
7950 gsi_remove (&gsi, true);
7953 /* Declare local variables needed in CHILD_CFUN. */
7954 block = DECL_INITIAL (child_fn);
7955 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
7956 /* The gimplifier could record temporaries in target block
7957 rather than in containing function's local_decls chain,
7958 which would mean cgraph missed finalizing them. Do it now. */
7959 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
7960 if (TREE_CODE (t) == VAR_DECL
7961 && TREE_STATIC (t)
7962 && !DECL_EXTERNAL (t))
7963 varpool_finalize_decl (t);
7964 DECL_SAVED_TREE (child_fn) = NULL;
7965 /* We'll create a CFG for child_fn, so no gimple body is needed. */
7966 gimple_set_body (child_fn, NULL);
7967 TREE_USED (block) = 1;
7969 /* Reset DECL_CONTEXT on function arguments. */
7970 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7971 DECL_CONTEXT (t) = child_fn;
7973 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
7974 so that it can be moved to the child function. */
7975 gsi = gsi_last_bb (entry_bb);
7976 stmt = gsi_stmt (gsi);
7977 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
7978 && gimple_omp_target_kind (stmt)
7979 == GF_OMP_TARGET_KIND_REGION);
7980 gsi_remove (&gsi, true);
7981 e = split_block (entry_bb, stmt);
7982 entry_bb = e->dest;
7983 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7985 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
7986 if (exit_bb)
7988 gsi = gsi_last_bb (exit_bb);
7989 gcc_assert (!gsi_end_p (gsi)
7990 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7991 stmt = gimple_build_return (NULL);
7992 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7993 gsi_remove (&gsi, true);
7996 /* Move the target region into CHILD_CFUN. */
7998 block = gimple_block (entry_stmt);
8000 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
8001 if (exit_bb)
8002 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
8003 /* When the OMP expansion process cannot guarantee an up-to-date
8004 loop tree arrange for the child function to fixup loops. */
8005 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8006 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8008 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8009 num = vec_safe_length (child_cfun->local_decls);
8010 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8012 t = (*child_cfun->local_decls)[srcidx];
8013 if (DECL_CONTEXT (t) == cfun->decl)
8014 continue;
8015 if (srcidx != dstidx)
8016 (*child_cfun->local_decls)[dstidx] = t;
8017 dstidx++;
8019 if (dstidx != num)
8020 vec_safe_truncate (child_cfun->local_decls, dstidx);
8022 /* Inform the callgraph about the new function. */
8023 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
8024 cgraph_add_new_function (child_fn, true);
8026 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8027 fixed in a following pass. */
8028 push_cfun (child_cfun);
8029 rebuild_cgraph_edges ();
8031 /* Some EH regions might become dead, see PR34608. If
8032 pass_cleanup_cfg isn't the first pass to happen with the
8033 new child, these dead EH edges might cause problems.
8034 Clean them up now. */
8035 if (flag_exceptions)
8037 basic_block bb;
8038 bool changed = false;
8040 FOR_EACH_BB_FN (bb, cfun)
8041 changed |= gimple_purge_dead_eh_edges (bb);
8042 if (changed)
8043 cleanup_tree_cfg ();
8045 pop_cfun ();
8048 /* Emit a library call to launch the target region, or do data
8049 transfers. */
8050 tree t1, t2, t3, t4, device, cond, c, clauses;
8051 enum built_in_function start_ix;
8052 location_t clause_loc;
8054 clauses = gimple_omp_target_clauses (entry_stmt);
8056 if (kind == GF_OMP_TARGET_KIND_REGION)
8057 start_ix = BUILT_IN_GOMP_TARGET;
8058 else if (kind == GF_OMP_TARGET_KIND_DATA)
8059 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8060 else
8061 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8063 /* By default, the value of DEVICE is -1 (let runtime library choose)
8064 and there is no conditional. */
8065 cond = NULL_TREE;
8066 device = build_int_cst (integer_type_node, -1);
8068 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
8069 if (c)
8070 cond = OMP_CLAUSE_IF_EXPR (c);
8072 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
8073 if (c)
8075 device = OMP_CLAUSE_DEVICE_ID (c);
8076 clause_loc = OMP_CLAUSE_LOCATION (c);
8078 else
8079 clause_loc = gimple_location (entry_stmt);
8081 /* Ensure 'device' is of the correct type. */
8082 device = fold_convert_loc (clause_loc, integer_type_node, device);
8084 /* If we found the clause 'if (cond)', build
8085 (cond ? device : -2). */
8086 if (cond)
8088 cond = gimple_boolify (cond);
8090 basic_block cond_bb, then_bb, else_bb;
8091 edge e;
8092 tree tmp_var;
8094 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
8095 if (kind != GF_OMP_TARGET_KIND_REGION)
8097 gsi = gsi_last_bb (new_bb);
8098 gsi_prev (&gsi);
8099 e = split_block (new_bb, gsi_stmt (gsi));
8101 else
8102 e = split_block (new_bb, NULL);
8103 cond_bb = e->src;
8104 new_bb = e->dest;
8105 remove_edge (e);
8107 then_bb = create_empty_bb (cond_bb);
8108 else_bb = create_empty_bb (then_bb);
8109 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
8110 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
8112 stmt = gimple_build_cond_empty (cond);
8113 gsi = gsi_last_bb (cond_bb);
8114 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8116 gsi = gsi_start_bb (then_bb);
8117 stmt = gimple_build_assign (tmp_var, device);
8118 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8120 gsi = gsi_start_bb (else_bb);
8121 stmt = gimple_build_assign (tmp_var,
8122 build_int_cst (integer_type_node, -2));
8123 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8125 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
8126 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
8127 add_bb_to_loop (then_bb, cond_bb->loop_father);
8128 add_bb_to_loop (else_bb, cond_bb->loop_father);
8129 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
8130 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
8132 device = tmp_var;
8135 gsi = gsi_last_bb (new_bb);
8136 t = gimple_omp_target_data_arg (entry_stmt);
8137 if (t == NULL)
8139 t1 = size_zero_node;
8140 t2 = build_zero_cst (ptr_type_node);
8141 t3 = t2;
8142 t4 = t2;
8144 else
8146 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8147 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8148 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8149 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8150 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8153 gimple g;
8154 /* FIXME: This will be address of
8155 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8156 symbol, as soon as the linker plugin is able to create it for us. */
8157 tree openmp_target = build_zero_cst (ptr_type_node);
8158 if (kind == GF_OMP_TARGET_KIND_REGION)
8160 tree fnaddr = build_fold_addr_expr (child_fn);
8161 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8162 device, fnaddr, openmp_target, t1, t2, t3, t4);
8164 else
8165 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8166 device, openmp_target, t1, t2, t3, t4);
8167 gimple_set_location (g, gimple_location (entry_stmt));
8168 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8169 if (kind != GF_OMP_TARGET_KIND_REGION)
8171 g = gsi_stmt (gsi);
8172 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8173 gsi_remove (&gsi, true);
8175 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8177 gsi = gsi_last_bb (region->exit);
8178 g = gsi_stmt (gsi);
8179 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8180 gsi_remove (&gsi, true);
8185 /* Expand the parallel region tree rooted at REGION. Expansion
8186 proceeds in depth-first order. Innermost regions are expanded
8187 first. This way, parallel regions that require a new function to
8188 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8189 internal dependencies in their body. */
8191 static void
8192 expand_omp (struct omp_region *region)
8194 while (region)
8196 location_t saved_location;
8197 gimple inner_stmt = NULL;
8199 /* First, determine whether this is a combined parallel+workshare
8200 region. */
8201 if (region->type == GIMPLE_OMP_PARALLEL)
8202 determine_parallel_type (region);
8204 if (region->type == GIMPLE_OMP_FOR
8205 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8206 inner_stmt = last_stmt (region->inner->entry);
8208 if (region->inner)
8209 expand_omp (region->inner);
8211 saved_location = input_location;
8212 if (gimple_has_location (last_stmt (region->entry)))
8213 input_location = gimple_location (last_stmt (region->entry));
8215 switch (region->type)
8217 case GIMPLE_OMP_PARALLEL:
8218 case GIMPLE_OMP_TASK:
8219 expand_omp_taskreg (region);
8220 break;
8222 case GIMPLE_OMP_FOR:
8223 expand_omp_for (region, inner_stmt);
8224 break;
8226 case GIMPLE_OMP_SECTIONS:
8227 expand_omp_sections (region);
8228 break;
8230 case GIMPLE_OMP_SECTION:
8231 /* Individual omp sections are handled together with their
8232 parent GIMPLE_OMP_SECTIONS region. */
8233 break;
8235 case GIMPLE_OMP_SINGLE:
8236 expand_omp_single (region);
8237 break;
8239 case GIMPLE_OMP_MASTER:
8240 case GIMPLE_OMP_TASKGROUP:
8241 case GIMPLE_OMP_ORDERED:
8242 case GIMPLE_OMP_CRITICAL:
8243 case GIMPLE_OMP_TEAMS:
8244 expand_omp_synch (region);
8245 break;
8247 case GIMPLE_OMP_ATOMIC_LOAD:
8248 expand_omp_atomic (region);
8249 break;
8251 case GIMPLE_OMP_TARGET:
8252 expand_omp_target (region);
8253 break;
8255 default:
8256 gcc_unreachable ();
8259 input_location = saved_location;
8260 region = region->next;
8265 /* Helper for build_omp_regions. Scan the dominator tree starting at
8266 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8267 true, the function ends once a single tree is built (otherwise, whole
8268 forest of OMP constructs may be built). */
8270 static void
8271 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8272 bool single_tree)
8274 gimple_stmt_iterator gsi;
8275 gimple stmt;
8276 basic_block son;
8278 gsi = gsi_last_bb (bb);
8279 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8281 struct omp_region *region;
8282 enum gimple_code code;
8284 stmt = gsi_stmt (gsi);
8285 code = gimple_code (stmt);
8286 if (code == GIMPLE_OMP_RETURN)
8288 /* STMT is the return point out of region PARENT. Mark it
8289 as the exit point and make PARENT the immediately
8290 enclosing region. */
8291 gcc_assert (parent);
8292 region = parent;
8293 region->exit = bb;
8294 parent = parent->outer;
8296 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8298 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8299 GIMPLE_OMP_RETURN, but matches with
8300 GIMPLE_OMP_ATOMIC_LOAD. */
8301 gcc_assert (parent);
8302 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8303 region = parent;
8304 region->exit = bb;
8305 parent = parent->outer;
8308 else if (code == GIMPLE_OMP_CONTINUE)
8310 gcc_assert (parent);
8311 parent->cont = bb;
8313 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8315 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8316 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8319 else if (code == GIMPLE_OMP_TARGET
8320 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8321 new_omp_region (bb, code, parent);
8322 else
8324 /* Otherwise, this directive becomes the parent for a new
8325 region. */
8326 region = new_omp_region (bb, code, parent);
8327 parent = region;
8331 if (single_tree && !parent)
8332 return;
8334 for (son = first_dom_son (CDI_DOMINATORS, bb);
8335 son;
8336 son = next_dom_son (CDI_DOMINATORS, son))
8337 build_omp_regions_1 (son, parent, single_tree);
8340 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8341 root_omp_region. */
8343 static void
8344 build_omp_regions_root (basic_block root)
8346 gcc_assert (root_omp_region == NULL);
8347 build_omp_regions_1 (root, NULL, true);
8348 gcc_assert (root_omp_region != NULL);
8351 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8353 void
8354 omp_expand_local (basic_block head)
8356 build_omp_regions_root (head);
8357 if (dump_file && (dump_flags & TDF_DETAILS))
8359 fprintf (dump_file, "\nOMP region tree\n\n");
8360 dump_omp_region (dump_file, root_omp_region, 0);
8361 fprintf (dump_file, "\n");
8364 remove_exit_barriers (root_omp_region);
8365 expand_omp (root_omp_region);
8367 free_omp_regions ();
8370 /* Scan the CFG and build a tree of OMP regions. Return the root of
8371 the OMP region tree. */
8373 static void
8374 build_omp_regions (void)
8376 gcc_assert (root_omp_region == NULL);
8377 calculate_dominance_info (CDI_DOMINATORS);
8378 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8381 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8383 static unsigned int
8384 execute_expand_omp (void)
8386 build_omp_regions ();
8388 if (!root_omp_region)
8389 return 0;
8391 if (dump_file)
8393 fprintf (dump_file, "\nOMP region tree\n\n");
8394 dump_omp_region (dump_file, root_omp_region, 0);
8395 fprintf (dump_file, "\n");
8398 remove_exit_barriers (root_omp_region);
8400 expand_omp (root_omp_region);
8402 cleanup_tree_cfg ();
8404 free_omp_regions ();
8406 return 0;
8409 /* OMP expansion -- the default pass, run before creation of SSA form. */
8411 namespace {
8413 const pass_data pass_data_expand_omp =
8415 GIMPLE_PASS, /* type */
8416 "ompexp", /* name */
8417 OPTGROUP_NONE, /* optinfo_flags */
8418 true, /* has_execute */
8419 TV_NONE, /* tv_id */
8420 PROP_gimple_any, /* properties_required */
8421 0, /* properties_provided */
8422 0, /* properties_destroyed */
8423 0, /* todo_flags_start */
8424 0, /* todo_flags_finish */
8427 class pass_expand_omp : public gimple_opt_pass
8429 public:
8430 pass_expand_omp (gcc::context *ctxt)
8431 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8434 /* opt_pass methods: */
8435 virtual bool gate (function *)
8437 return ((flag_openmp != 0 || flag_openmp_simd != 0
8438 || flag_cilkplus != 0) && !seen_error ());
8441 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8443 }; // class pass_expand_omp
8445 } // anon namespace
8447 gimple_opt_pass *
8448 make_pass_expand_omp (gcc::context *ctxt)
8450 return new pass_expand_omp (ctxt);
8453 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8455 /* If ctx is a worksharing context inside of a cancellable parallel
8456 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8457 and conditional branch to parallel's cancel_label to handle
8458 cancellation in the implicit barrier. */
8460 static void
8461 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8463 gimple omp_return = gimple_seq_last_stmt (*body);
8464 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8465 if (gimple_omp_return_nowait_p (omp_return))
8466 return;
8467 if (ctx->outer
8468 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8469 && ctx->outer->cancellable)
8471 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
8472 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
8473 tree lhs = create_tmp_var (c_bool_type, NULL);
8474 gimple_omp_return_set_lhs (omp_return, lhs);
8475 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8476 gimple g = gimple_build_cond (NE_EXPR, lhs,
8477 fold_convert (c_bool_type,
8478 boolean_false_node),
8479 ctx->outer->cancel_label, fallthru_label);
8480 gimple_seq_add_stmt (body, g);
8481 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8485 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8486 CTX is the enclosing OMP context for the current statement. */
8488 static void
8489 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8491 tree block, control;
8492 gimple_stmt_iterator tgsi;
8493 gimple stmt, new_stmt, bind, t;
8494 gimple_seq ilist, dlist, olist, new_body;
8496 stmt = gsi_stmt (*gsi_p);
8498 push_gimplify_context ();
8500 dlist = NULL;
8501 ilist = NULL;
8502 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8503 &ilist, &dlist, ctx, NULL);
8505 new_body = gimple_omp_body (stmt);
8506 gimple_omp_set_body (stmt, NULL);
8507 tgsi = gsi_start (new_body);
8508 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8510 omp_context *sctx;
8511 gimple sec_start;
8513 sec_start = gsi_stmt (tgsi);
8514 sctx = maybe_lookup_ctx (sec_start);
8515 gcc_assert (sctx);
8517 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8518 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8519 GSI_CONTINUE_LINKING);
8520 gimple_omp_set_body (sec_start, NULL);
8522 if (gsi_one_before_end_p (tgsi))
8524 gimple_seq l = NULL;
8525 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8526 &l, ctx);
8527 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8528 gimple_omp_section_set_last (sec_start);
8531 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8532 GSI_CONTINUE_LINKING);
8535 block = make_node (BLOCK);
8536 bind = gimple_build_bind (NULL, new_body, block);
8538 olist = NULL;
8539 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
8541 block = make_node (BLOCK);
8542 new_stmt = gimple_build_bind (NULL, NULL, block);
8543 gsi_replace (gsi_p, new_stmt, true);
8545 pop_gimplify_context (new_stmt);
8546 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8547 BLOCK_VARS (block) = gimple_bind_vars (bind);
8548 if (BLOCK_VARS (block))
8549 TREE_USED (block) = 1;
8551 new_body = NULL;
8552 gimple_seq_add_seq (&new_body, ilist);
8553 gimple_seq_add_stmt (&new_body, stmt);
8554 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8555 gimple_seq_add_stmt (&new_body, bind);
8557 control = create_tmp_var (unsigned_type_node, ".section");
8558 t = gimple_build_omp_continue (control, control);
8559 gimple_omp_sections_set_control (stmt, control);
8560 gimple_seq_add_stmt (&new_body, t);
8562 gimple_seq_add_seq (&new_body, olist);
8563 if (ctx->cancellable)
8564 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
8565 gimple_seq_add_seq (&new_body, dlist);
8567 new_body = maybe_catch_exception (new_body);
8569 t = gimple_build_omp_return
8570 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8571 OMP_CLAUSE_NOWAIT));
8572 gimple_seq_add_stmt (&new_body, t);
8573 maybe_add_implicit_barrier_cancel (ctx, &new_body);
8575 gimple_bind_set_body (new_stmt, new_body);
8579 /* A subroutine of lower_omp_single. Expand the simple form of
8580 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8582 if (GOMP_single_start ())
8583 BODY;
8584 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8586 FIXME. It may be better to delay expanding the logic of this until
8587 pass_expand_omp. The expanded logic may make the job more difficult
8588 to a synchronization analysis pass. */
8590 static void
8591 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
8593 location_t loc = gimple_location (single_stmt);
8594 tree tlabel = create_artificial_label (loc);
8595 tree flabel = create_artificial_label (loc);
8596 gimple call, cond;
8597 tree lhs, decl;
8599 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
8600 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8601 call = gimple_build_call (decl, 0);
8602 gimple_call_set_lhs (call, lhs);
8603 gimple_seq_add_stmt (pre_p, call);
8605 cond = gimple_build_cond (EQ_EXPR, lhs,
8606 fold_convert_loc (loc, TREE_TYPE (lhs),
8607 boolean_true_node),
8608 tlabel, flabel);
8609 gimple_seq_add_stmt (pre_p, cond);
8610 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
8611 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8612 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
8616 /* A subroutine of lower_omp_single. Expand the simple form of
8617 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
8619 #pragma omp single copyprivate (a, b, c)
8621 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
8624 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
8626 BODY;
8627 copyout.a = a;
8628 copyout.b = b;
8629 copyout.c = c;
8630 GOMP_single_copy_end (&copyout);
8632 else
8634 a = copyout_p->a;
8635 b = copyout_p->b;
8636 c = copyout_p->c;
8638 GOMP_barrier ();
8641 FIXME. It may be better to delay expanding the logic of this until
8642 pass_expand_omp. The expanded logic may make the job more difficult
8643 to a synchronization analysis pass. */
8645 static void
8646 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
8648 tree ptr_type, t, l0, l1, l2, bfn_decl;
8649 gimple_seq copyin_seq;
8650 location_t loc = gimple_location (single_stmt);
8652 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
8654 ptr_type = build_pointer_type (ctx->record_type);
8655 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
8657 l0 = create_artificial_label (loc);
8658 l1 = create_artificial_label (loc);
8659 l2 = create_artificial_label (loc);
8661 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
8662 t = build_call_expr_loc (loc, bfn_decl, 0);
8663 t = fold_convert_loc (loc, ptr_type, t);
8664 gimplify_assign (ctx->receiver_decl, t, pre_p);
8666 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
8667 build_int_cst (ptr_type, 0));
8668 t = build3 (COND_EXPR, void_type_node, t,
8669 build_and_jump (&l0), build_and_jump (&l1));
8670 gimplify_and_add (t, pre_p);
8672 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
8674 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8676 copyin_seq = NULL;
8677 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
8678 &copyin_seq, ctx);
8680 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
8681 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
8682 t = build_call_expr_loc (loc, bfn_decl, 1, t);
8683 gimplify_and_add (t, pre_p);
8685 t = build_and_jump (&l2);
8686 gimplify_and_add (t, pre_p);
8688 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
8690 gimple_seq_add_seq (pre_p, copyin_seq);
8692 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
8696 /* Expand code for an OpenMP single directive. */
8698 static void
8699 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8701 tree block;
8702 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
8703 gimple_seq bind_body, bind_body_tail = NULL, dlist;
8705 push_gimplify_context ();
8707 block = make_node (BLOCK);
8708 bind = gimple_build_bind (NULL, NULL, block);
8709 gsi_replace (gsi_p, bind, true);
8710 bind_body = NULL;
8711 dlist = NULL;
8712 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
8713 &bind_body, &dlist, ctx, NULL);
8714 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
8716 gimple_seq_add_stmt (&bind_body, single_stmt);
8718 if (ctx->record_type)
8719 lower_omp_single_copy (single_stmt, &bind_body, ctx);
8720 else
8721 lower_omp_single_simple (single_stmt, &bind_body);
8723 gimple_omp_set_body (single_stmt, NULL);
8725 gimple_seq_add_seq (&bind_body, dlist);
8727 bind_body = maybe_catch_exception (bind_body);
8729 t = gimple_build_omp_return
8730 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
8731 OMP_CLAUSE_NOWAIT));
8732 gimple_seq_add_stmt (&bind_body_tail, t);
8733 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
8734 if (ctx->record_type)
8736 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
8737 tree clobber = build_constructor (ctx->record_type, NULL);
8738 TREE_THIS_VOLATILE (clobber) = 1;
8739 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
8740 clobber), GSI_SAME_STMT);
8742 gimple_seq_add_seq (&bind_body, bind_body_tail);
8743 gimple_bind_set_body (bind, bind_body);
8745 pop_gimplify_context (bind);
8747 gimple_bind_append_vars (bind, ctx->block_vars);
8748 BLOCK_VARS (block) = ctx->block_vars;
8749 if (BLOCK_VARS (block))
8750 TREE_USED (block) = 1;
8754 /* Expand code for an OpenMP master directive. */
8756 static void
8757 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8759 tree block, lab = NULL, x, bfn_decl;
8760 gimple stmt = gsi_stmt (*gsi_p), bind;
8761 location_t loc = gimple_location (stmt);
8762 gimple_seq tseq;
8764 push_gimplify_context ();
8766 block = make_node (BLOCK);
8767 bind = gimple_build_bind (NULL, NULL, block);
8768 gsi_replace (gsi_p, bind, true);
8769 gimple_bind_add_stmt (bind, stmt);
8771 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
8772 x = build_call_expr_loc (loc, bfn_decl, 0);
8773 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
8774 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
8775 tseq = NULL;
8776 gimplify_and_add (x, &tseq);
8777 gimple_bind_add_seq (bind, tseq);
8779 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8780 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8781 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8782 gimple_omp_set_body (stmt, NULL);
8784 gimple_bind_add_stmt (bind, gimple_build_label (lab));
8786 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8788 pop_gimplify_context (bind);
8790 gimple_bind_append_vars (bind, ctx->block_vars);
8791 BLOCK_VARS (block) = ctx->block_vars;
8795 /* Expand code for an OpenMP taskgroup directive. */
8797 static void
8798 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8800 gimple stmt = gsi_stmt (*gsi_p), bind, x;
8801 tree block = make_node (BLOCK);
8803 bind = gimple_build_bind (NULL, NULL, block);
8804 gsi_replace (gsi_p, bind, true);
8805 gimple_bind_add_stmt (bind, stmt);
8807 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
8809 gimple_bind_add_stmt (bind, x);
8811 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8812 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8813 gimple_omp_set_body (stmt, NULL);
8815 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8817 gimple_bind_append_vars (bind, ctx->block_vars);
8818 BLOCK_VARS (block) = ctx->block_vars;
8822 /* Expand code for an OpenMP ordered directive. */
8824 static void
8825 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8827 tree block;
8828 gimple stmt = gsi_stmt (*gsi_p), bind, x;
8830 push_gimplify_context ();
8832 block = make_node (BLOCK);
8833 bind = gimple_build_bind (NULL, NULL, block);
8834 gsi_replace (gsi_p, bind, true);
8835 gimple_bind_add_stmt (bind, stmt);
8837 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
8839 gimple_bind_add_stmt (bind, x);
8841 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8842 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8843 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8844 gimple_omp_set_body (stmt, NULL);
8846 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
8847 gimple_bind_add_stmt (bind, x);
8849 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8851 pop_gimplify_context (bind);
8853 gimple_bind_append_vars (bind, ctx->block_vars);
8854 BLOCK_VARS (block) = gimple_bind_vars (bind);
8858 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
8859 substitution of a couple of function calls. But in the NAMED case,
8860 requires that languages coordinate a symbol name. It is therefore
8861 best put here in common code. */
8863 static GTY((param1_is (tree), param2_is (tree)))
8864 splay_tree critical_name_mutexes;
8866 static void
8867 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8869 tree block;
8870 tree name, lock, unlock;
8871 gimple stmt = gsi_stmt (*gsi_p), bind;
8872 location_t loc = gimple_location (stmt);
8873 gimple_seq tbody;
8875 name = gimple_omp_critical_name (stmt);
8876 if (name)
8878 tree decl;
8879 splay_tree_node n;
8881 if (!critical_name_mutexes)
8882 critical_name_mutexes
8883 = splay_tree_new_ggc (splay_tree_compare_pointers,
8884 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
8885 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
8887 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
8888 if (n == NULL)
8890 char *new_str;
8892 decl = create_tmp_var_raw (ptr_type_node, NULL);
8894 new_str = ACONCAT ((".gomp_critical_user_",
8895 IDENTIFIER_POINTER (name), NULL));
8896 DECL_NAME (decl) = get_identifier (new_str);
8897 TREE_PUBLIC (decl) = 1;
8898 TREE_STATIC (decl) = 1;
8899 DECL_COMMON (decl) = 1;
8900 DECL_ARTIFICIAL (decl) = 1;
8901 DECL_IGNORED_P (decl) = 1;
8902 varpool_finalize_decl (decl);
8904 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
8905 (splay_tree_value) decl);
8907 else
8908 decl = (tree) n->value;
8910 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
8911 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
8913 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
8914 unlock = build_call_expr_loc (loc, unlock, 1,
8915 build_fold_addr_expr_loc (loc, decl));
8917 else
8919 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
8920 lock = build_call_expr_loc (loc, lock, 0);
8922 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
8923 unlock = build_call_expr_loc (loc, unlock, 0);
8926 push_gimplify_context ();
8928 block = make_node (BLOCK);
8929 bind = gimple_build_bind (NULL, NULL, block);
8930 gsi_replace (gsi_p, bind, true);
8931 gimple_bind_add_stmt (bind, stmt);
8933 tbody = gimple_bind_body (bind);
8934 gimplify_and_add (lock, &tbody);
8935 gimple_bind_set_body (bind, tbody);
8937 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8938 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8939 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8940 gimple_omp_set_body (stmt, NULL);
8942 tbody = gimple_bind_body (bind);
8943 gimplify_and_add (unlock, &tbody);
8944 gimple_bind_set_body (bind, tbody);
8946 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8948 pop_gimplify_context (bind);
8949 gimple_bind_append_vars (bind, ctx->block_vars);
8950 BLOCK_VARS (block) = gimple_bind_vars (bind);
8954 /* A subroutine of lower_omp_for. Generate code to emit the predicate
8955 for a lastprivate clause. Given a loop control predicate of (V
8956 cond N2), we gate the clause on (!(V cond N2)). The lowered form
8957 is appended to *DLIST, iterator initialization is appended to
8958 *BODY_P. */
8960 static void
8961 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
8962 gimple_seq *dlist, struct omp_context *ctx)
8964 tree clauses, cond, vinit;
8965 enum tree_code cond_code;
8966 gimple_seq stmts;
8968 cond_code = fd->loop.cond_code;
8969 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
8971 /* When possible, use a strict equality expression. This can let VRP
8972 type optimizations deduce the value and remove a copy. */
8973 if (tree_fits_shwi_p (fd->loop.step))
8975 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
8976 if (step == 1 || step == -1)
8977 cond_code = EQ_EXPR;
8980 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
8982 clauses = gimple_omp_for_clauses (fd->for_stmt);
8983 stmts = NULL;
8984 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
8985 if (!gimple_seq_empty_p (stmts))
8987 gimple_seq_add_seq (&stmts, *dlist);
8988 *dlist = stmts;
8990 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
8991 vinit = fd->loop.n1;
8992 if (cond_code == EQ_EXPR
8993 && tree_fits_shwi_p (fd->loop.n2)
8994 && ! integer_zerop (fd->loop.n2))
8995 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
8996 else
8997 vinit = unshare_expr (vinit);
8999 /* Initialize the iterator variable, so that threads that don't execute
9000 any iterations don't execute the lastprivate clauses by accident. */
9001 gimplify_assign (fd->loop.v, vinit, body_p);
9006 /* Lower code for an OpenMP loop directive. */
9008 static void
9009 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9011 tree *rhs_p, block;
9012 struct omp_for_data fd, *fdp = NULL;
9013 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
9014 gimple_seq omp_for_body, body, dlist;
9015 size_t i;
9017 push_gimplify_context ();
9019 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
9021 block = make_node (BLOCK);
9022 new_stmt = gimple_build_bind (NULL, NULL, block);
9023 /* Replace at gsi right away, so that 'stmt' is no member
9024 of a sequence anymore as we're going to add to to a different
9025 one below. */
9026 gsi_replace (gsi_p, new_stmt, true);
9028 /* Move declaration of temporaries in the loop body before we make
9029 it go away. */
9030 omp_for_body = gimple_omp_body (stmt);
9031 if (!gimple_seq_empty_p (omp_for_body)
9032 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
9034 gimple inner_bind = gimple_seq_first_stmt (omp_for_body);
9035 tree vars = gimple_bind_vars (inner_bind);
9036 gimple_bind_append_vars (new_stmt, vars);
9037 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
9038 keep them on the inner_bind and it's block. */
9039 gimple_bind_set_vars (inner_bind, NULL_TREE);
9040 if (gimple_bind_block (inner_bind))
9041 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
9044 if (gimple_omp_for_combined_into_p (stmt))
9046 extract_omp_for_data (stmt, &fd, NULL);
9047 fdp = &fd;
9049 /* We need two temporaries with fd.loop.v type (istart/iend)
9050 and then (fd.collapse - 1) temporaries with the same
9051 type for count2 ... countN-1 vars if not constant. */
9052 size_t count = 2;
9053 tree type = fd.iter_type;
9054 if (fd.collapse > 1
9055 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
9056 count += fd.collapse - 1;
9057 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
9058 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
9059 tree clauses = *pc;
9060 if (parallel_for)
9061 outerc
9062 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
9063 OMP_CLAUSE__LOOPTEMP_);
9064 for (i = 0; i < count; i++)
9066 tree temp;
9067 if (parallel_for)
9069 gcc_assert (outerc);
9070 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
9071 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
9072 OMP_CLAUSE__LOOPTEMP_);
9074 else
9076 temp = create_tmp_var (type, NULL);
9077 insert_decl_map (&ctx->outer->cb, temp, temp);
9079 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
9080 OMP_CLAUSE_DECL (*pc) = temp;
9081 pc = &OMP_CLAUSE_CHAIN (*pc);
9083 *pc = clauses;
9086 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9087 dlist = NULL;
9088 body = NULL;
9089 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
9090 fdp);
9091 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
9093 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9095 /* Lower the header expressions. At this point, we can assume that
9096 the header is of the form:
9098 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9100 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9101 using the .omp_data_s mapping, if needed. */
9102 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
9104 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
9105 if (!is_gimple_min_invariant (*rhs_p))
9106 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9108 rhs_p = gimple_omp_for_final_ptr (stmt, i);
9109 if (!is_gimple_min_invariant (*rhs_p))
9110 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9112 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
9113 if (!is_gimple_min_invariant (*rhs_p))
9114 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9117 /* Once lowered, extract the bounds and clauses. */
9118 extract_omp_for_data (stmt, &fd, NULL);
9120 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
9122 gimple_seq_add_stmt (&body, stmt);
9123 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
9125 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
9126 fd.loop.v));
9128 /* After the loop, add exit clauses. */
9129 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
9131 if (ctx->cancellable)
9132 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
9134 gimple_seq_add_seq (&body, dlist);
9136 body = maybe_catch_exception (body);
9138 /* Region exit marker goes at the end of the loop body. */
9139 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
9140 maybe_add_implicit_barrier_cancel (ctx, &body);
9141 pop_gimplify_context (new_stmt);
9143 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9144 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
9145 if (BLOCK_VARS (block))
9146 TREE_USED (block) = 1;
9148 gimple_bind_set_body (new_stmt, body);
9149 gimple_omp_set_body (stmt, NULL);
9150 gimple_omp_for_set_pre_body (stmt, NULL);
9153 /* Callback for walk_stmts. Check if the current statement only contains
9154 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9156 static tree
9157 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9158 bool *handled_ops_p,
9159 struct walk_stmt_info *wi)
9161 int *info = (int *) wi->info;
9162 gimple stmt = gsi_stmt (*gsi_p);
9164 *handled_ops_p = true;
9165 switch (gimple_code (stmt))
9167 WALK_SUBSTMTS;
9169 case GIMPLE_OMP_FOR:
9170 case GIMPLE_OMP_SECTIONS:
9171 *info = *info == 0 ? 1 : -1;
9172 break;
9173 default:
9174 *info = -1;
9175 break;
9177 return NULL;
9180 struct omp_taskcopy_context
9182 /* This field must be at the beginning, as we do "inheritance": Some
9183 callback functions for tree-inline.c (e.g., omp_copy_decl)
9184 receive a copy_body_data pointer that is up-casted to an
9185 omp_context pointer. */
9186 copy_body_data cb;
9187 omp_context *ctx;
9190 static tree
9191 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9193 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9195 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9196 return create_tmp_var (TREE_TYPE (var), NULL);
9198 return var;
9201 static tree
9202 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9204 tree name, new_fields = NULL, type, f;
9206 type = lang_hooks.types.make_type (RECORD_TYPE);
9207 name = DECL_NAME (TYPE_NAME (orig_type));
9208 name = build_decl (gimple_location (tcctx->ctx->stmt),
9209 TYPE_DECL, name, type);
9210 TYPE_NAME (type) = name;
9212 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9214 tree new_f = copy_node (f);
9215 DECL_CONTEXT (new_f) = type;
9216 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9217 TREE_CHAIN (new_f) = new_fields;
9218 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9219 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9220 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9221 &tcctx->cb, NULL);
9222 new_fields = new_f;
9223 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
9225 TYPE_FIELDS (type) = nreverse (new_fields);
9226 layout_type (type);
9227 return type;
9230 /* Create task copyfn. */
9232 static void
9233 create_task_copyfn (gimple task_stmt, omp_context *ctx)
9235 struct function *child_cfun;
9236 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9237 tree record_type, srecord_type, bind, list;
9238 bool record_needs_remap = false, srecord_needs_remap = false;
9239 splay_tree_node n;
9240 struct omp_taskcopy_context tcctx;
9241 location_t loc = gimple_location (task_stmt);
9243 child_fn = gimple_omp_task_copy_fn (task_stmt);
9244 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9245 gcc_assert (child_cfun->cfg == NULL);
9246 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9248 /* Reset DECL_CONTEXT on function arguments. */
9249 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9250 DECL_CONTEXT (t) = child_fn;
9252 /* Populate the function. */
9253 push_gimplify_context ();
9254 push_cfun (child_cfun);
9256 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9257 TREE_SIDE_EFFECTS (bind) = 1;
9258 list = NULL;
9259 DECL_SAVED_TREE (child_fn) = bind;
9260 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9262 /* Remap src and dst argument types if needed. */
9263 record_type = ctx->record_type;
9264 srecord_type = ctx->srecord_type;
9265 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9266 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9268 record_needs_remap = true;
9269 break;
9271 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9272 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9274 srecord_needs_remap = true;
9275 break;
9278 if (record_needs_remap || srecord_needs_remap)
9280 memset (&tcctx, '\0', sizeof (tcctx));
9281 tcctx.cb.src_fn = ctx->cb.src_fn;
9282 tcctx.cb.dst_fn = child_fn;
9283 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
9284 gcc_checking_assert (tcctx.cb.src_node);
9285 tcctx.cb.dst_node = tcctx.cb.src_node;
9286 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9287 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9288 tcctx.cb.eh_lp_nr = 0;
9289 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9290 tcctx.cb.decl_map = pointer_map_create ();
9291 tcctx.ctx = ctx;
9293 if (record_needs_remap)
9294 record_type = task_copyfn_remap_type (&tcctx, record_type);
9295 if (srecord_needs_remap)
9296 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9298 else
9299 tcctx.cb.decl_map = NULL;
9301 arg = DECL_ARGUMENTS (child_fn);
9302 TREE_TYPE (arg) = build_pointer_type (record_type);
9303 sarg = DECL_CHAIN (arg);
9304 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9306 /* First pass: initialize temporaries used in record_type and srecord_type
9307 sizes and field offsets. */
9308 if (tcctx.cb.decl_map)
9309 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9310 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9312 tree *p;
9314 decl = OMP_CLAUSE_DECL (c);
9315 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
9316 if (p == NULL)
9317 continue;
9318 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9319 sf = (tree) n->value;
9320 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9321 src = build_simple_mem_ref_loc (loc, sarg);
9322 src = omp_build_component_ref (src, sf);
9323 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9324 append_to_statement_list (t, &list);
9327 /* Second pass: copy shared var pointers and copy construct non-VLA
9328 firstprivate vars. */
9329 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9330 switch (OMP_CLAUSE_CODE (c))
9332 case OMP_CLAUSE_SHARED:
9333 decl = OMP_CLAUSE_DECL (c);
9334 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9335 if (n == NULL)
9336 break;
9337 f = (tree) n->value;
9338 if (tcctx.cb.decl_map)
9339 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9340 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9341 sf = (tree) n->value;
9342 if (tcctx.cb.decl_map)
9343 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9344 src = build_simple_mem_ref_loc (loc, sarg);
9345 src = omp_build_component_ref (src, sf);
9346 dst = build_simple_mem_ref_loc (loc, arg);
9347 dst = omp_build_component_ref (dst, f);
9348 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9349 append_to_statement_list (t, &list);
9350 break;
9351 case OMP_CLAUSE_FIRSTPRIVATE:
9352 decl = OMP_CLAUSE_DECL (c);
9353 if (is_variable_sized (decl))
9354 break;
9355 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9356 if (n == NULL)
9357 break;
9358 f = (tree) n->value;
9359 if (tcctx.cb.decl_map)
9360 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9361 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9362 if (n != NULL)
9364 sf = (tree) n->value;
9365 if (tcctx.cb.decl_map)
9366 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9367 src = build_simple_mem_ref_loc (loc, sarg);
9368 src = omp_build_component_ref (src, sf);
9369 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9370 src = build_simple_mem_ref_loc (loc, src);
9372 else
9373 src = decl;
9374 dst = build_simple_mem_ref_loc (loc, arg);
9375 dst = omp_build_component_ref (dst, f);
9376 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9377 append_to_statement_list (t, &list);
9378 break;
9379 case OMP_CLAUSE_PRIVATE:
9380 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9381 break;
9382 decl = OMP_CLAUSE_DECL (c);
9383 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9384 f = (tree) n->value;
9385 if (tcctx.cb.decl_map)
9386 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9387 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9388 if (n != NULL)
9390 sf = (tree) n->value;
9391 if (tcctx.cb.decl_map)
9392 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9393 src = build_simple_mem_ref_loc (loc, sarg);
9394 src = omp_build_component_ref (src, sf);
9395 if (use_pointer_for_field (decl, NULL))
9396 src = build_simple_mem_ref_loc (loc, src);
9398 else
9399 src = decl;
9400 dst = build_simple_mem_ref_loc (loc, arg);
9401 dst = omp_build_component_ref (dst, f);
9402 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9403 append_to_statement_list (t, &list);
9404 break;
9405 default:
9406 break;
9409 /* Last pass: handle VLA firstprivates. */
9410 if (tcctx.cb.decl_map)
9411 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9412 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9414 tree ind, ptr, df;
9416 decl = OMP_CLAUSE_DECL (c);
9417 if (!is_variable_sized (decl))
9418 continue;
9419 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9420 if (n == NULL)
9421 continue;
9422 f = (tree) n->value;
9423 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9424 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9425 ind = DECL_VALUE_EXPR (decl);
9426 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9427 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9428 n = splay_tree_lookup (ctx->sfield_map,
9429 (splay_tree_key) TREE_OPERAND (ind, 0));
9430 sf = (tree) n->value;
9431 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9432 src = build_simple_mem_ref_loc (loc, sarg);
9433 src = omp_build_component_ref (src, sf);
9434 src = build_simple_mem_ref_loc (loc, src);
9435 dst = build_simple_mem_ref_loc (loc, arg);
9436 dst = omp_build_component_ref (dst, f);
9437 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9438 append_to_statement_list (t, &list);
9439 n = splay_tree_lookup (ctx->field_map,
9440 (splay_tree_key) TREE_OPERAND (ind, 0));
9441 df = (tree) n->value;
9442 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
9443 ptr = build_simple_mem_ref_loc (loc, arg);
9444 ptr = omp_build_component_ref (ptr, df);
9445 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9446 build_fold_addr_expr_loc (loc, dst));
9447 append_to_statement_list (t, &list);
9450 t = build1 (RETURN_EXPR, void_type_node, NULL);
9451 append_to_statement_list (t, &list);
9453 if (tcctx.cb.decl_map)
9454 pointer_map_destroy (tcctx.cb.decl_map);
9455 pop_gimplify_context (NULL);
9456 BIND_EXPR_BODY (bind) = list;
9457 pop_cfun ();
9460 static void
9461 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9463 tree c, clauses;
9464 gimple g;
9465 size_t n_in = 0, n_out = 0, idx = 2, i;
9467 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9468 OMP_CLAUSE_DEPEND);
9469 gcc_assert (clauses);
9470 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9471 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9472 switch (OMP_CLAUSE_DEPEND_KIND (c))
9474 case OMP_CLAUSE_DEPEND_IN:
9475 n_in++;
9476 break;
9477 case OMP_CLAUSE_DEPEND_OUT:
9478 case OMP_CLAUSE_DEPEND_INOUT:
9479 n_out++;
9480 break;
9481 default:
9482 gcc_unreachable ();
9484 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9485 tree array = create_tmp_var (type, NULL);
9486 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9487 NULL_TREE);
9488 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9489 gimple_seq_add_stmt (iseq, g);
9490 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9491 NULL_TREE);
9492 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9493 gimple_seq_add_stmt (iseq, g);
9494 for (i = 0; i < 2; i++)
9496 if ((i ? n_in : n_out) == 0)
9497 continue;
9498 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9499 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9500 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9502 tree t = OMP_CLAUSE_DECL (c);
9503 t = fold_convert (ptr_type_node, t);
9504 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9505 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9506 NULL_TREE, NULL_TREE);
9507 g = gimple_build_assign (r, t);
9508 gimple_seq_add_stmt (iseq, g);
9511 tree *p = gimple_omp_task_clauses_ptr (stmt);
9512 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9513 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9514 OMP_CLAUSE_CHAIN (c) = *p;
9515 *p = c;
9516 tree clobber = build_constructor (type, NULL);
9517 TREE_THIS_VOLATILE (clobber) = 1;
9518 g = gimple_build_assign (array, clobber);
9519 gimple_seq_add_stmt (oseq, g);
9522 /* Lower the OpenMP parallel or task directive in the current statement
9523 in GSI_P. CTX holds context information for the directive. */
9525 static void
9526 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9528 tree clauses;
9529 tree child_fn, t;
9530 gimple stmt = gsi_stmt (*gsi_p);
9531 gimple par_bind, bind, dep_bind = NULL;
9532 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9533 location_t loc = gimple_location (stmt);
9535 clauses = gimple_omp_taskreg_clauses (stmt);
9536 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9537 par_body = gimple_bind_body (par_bind);
9538 child_fn = ctx->cb.dst_fn;
9539 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9540 && !gimple_omp_parallel_combined_p (stmt))
9542 struct walk_stmt_info wi;
9543 int ws_num = 0;
9545 memset (&wi, 0, sizeof (wi));
9546 wi.info = &ws_num;
9547 wi.val_only = true;
9548 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
9549 if (ws_num == 1)
9550 gimple_omp_parallel_set_combined_p (stmt, true);
9552 gimple_seq dep_ilist = NULL;
9553 gimple_seq dep_olist = NULL;
9554 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9555 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9557 push_gimplify_context ();
9558 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9559 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9562 if (ctx->srecord_type)
9563 create_task_copyfn (stmt, ctx);
9565 push_gimplify_context ();
9567 par_olist = NULL;
9568 par_ilist = NULL;
9569 par_rlist = NULL;
9570 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
9571 lower_omp (&par_body, ctx);
9572 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
9573 lower_reduction_clauses (clauses, &par_rlist, ctx);
9575 /* Declare all the variables created by mapping and the variables
9576 declared in the scope of the parallel body. */
9577 record_vars_into (ctx->block_vars, child_fn);
9578 record_vars_into (gimple_bind_vars (par_bind), child_fn);
9580 if (ctx->record_type)
9582 ctx->sender_decl
9583 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9584 : ctx->record_type, ".omp_data_o");
9585 DECL_NAMELESS (ctx->sender_decl) = 1;
9586 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9587 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
9590 olist = NULL;
9591 ilist = NULL;
9592 lower_send_clauses (clauses, &ilist, &olist, ctx);
9593 lower_send_shared_vars (&ilist, &olist, ctx);
9595 if (ctx->record_type)
9597 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
9598 TREE_THIS_VOLATILE (clobber) = 1;
9599 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9600 clobber));
9603 /* Once all the expansions are done, sequence all the different
9604 fragments inside gimple_omp_body. */
9606 new_body = NULL;
9608 if (ctx->record_type)
9610 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9611 /* fixup_child_record_type might have changed receiver_decl's type. */
9612 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9613 gimple_seq_add_stmt (&new_body,
9614 gimple_build_assign (ctx->receiver_decl, t));
9617 gimple_seq_add_seq (&new_body, par_ilist);
9618 gimple_seq_add_seq (&new_body, par_body);
9619 gimple_seq_add_seq (&new_body, par_rlist);
9620 if (ctx->cancellable)
9621 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
9622 gimple_seq_add_seq (&new_body, par_olist);
9623 new_body = maybe_catch_exception (new_body);
9624 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9625 gimple_omp_set_body (stmt, new_body);
9627 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
9628 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
9629 gimple_bind_add_seq (bind, ilist);
9630 gimple_bind_add_stmt (bind, stmt);
9631 gimple_bind_add_seq (bind, olist);
9633 pop_gimplify_context (NULL);
9635 if (dep_bind)
9637 gimple_bind_add_seq (dep_bind, dep_ilist);
9638 gimple_bind_add_stmt (dep_bind, bind);
9639 gimple_bind_add_seq (dep_bind, dep_olist);
9640 pop_gimplify_context (dep_bind);
9644 /* Lower the OpenMP target directive in the current statement
9645 in GSI_P. CTX holds context information for the directive. */
9647 static void
9648 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9650 tree clauses;
9651 tree child_fn, t, c;
9652 gimple stmt = gsi_stmt (*gsi_p);
9653 gimple tgt_bind = NULL, bind;
9654 gimple_seq tgt_body = NULL, olist, ilist, new_body;
9655 location_t loc = gimple_location (stmt);
9656 int kind = gimple_omp_target_kind (stmt);
9657 unsigned int map_cnt = 0;
9659 clauses = gimple_omp_target_clauses (stmt);
9660 if (kind == GF_OMP_TARGET_KIND_REGION)
9662 tgt_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9663 tgt_body = gimple_bind_body (tgt_bind);
9665 else if (kind == GF_OMP_TARGET_KIND_DATA)
9666 tgt_body = gimple_omp_body (stmt);
9667 child_fn = ctx->cb.dst_fn;
9669 push_gimplify_context ();
9671 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9672 switch (OMP_CLAUSE_CODE (c))
9674 tree var, x;
9676 default:
9677 break;
9678 case OMP_CLAUSE_MAP:
9679 case OMP_CLAUSE_TO:
9680 case OMP_CLAUSE_FROM:
9681 var = OMP_CLAUSE_DECL (c);
9682 if (!DECL_P (var))
9684 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
9685 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9686 map_cnt++;
9687 continue;
9690 if (DECL_SIZE (var)
9691 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
9693 tree var2 = DECL_VALUE_EXPR (var);
9694 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
9695 var2 = TREE_OPERAND (var2, 0);
9696 gcc_assert (DECL_P (var2));
9697 var = var2;
9700 if (!maybe_lookup_field (var, ctx))
9701 continue;
9703 if (kind == GF_OMP_TARGET_KIND_REGION)
9705 x = build_receiver_ref (var, true, ctx);
9706 tree new_var = lookup_decl (var, ctx);
9707 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9708 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9709 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9710 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
9711 x = build_simple_mem_ref (x);
9712 SET_DECL_VALUE_EXPR (new_var, x);
9713 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
9715 map_cnt++;
9718 if (kind == GF_OMP_TARGET_KIND_REGION)
9720 target_nesting_level++;
9721 lower_omp (&tgt_body, ctx);
9722 target_nesting_level--;
9724 else if (kind == GF_OMP_TARGET_KIND_DATA)
9725 lower_omp (&tgt_body, ctx);
9727 if (kind == GF_OMP_TARGET_KIND_REGION)
9729 /* Declare all the variables created by mapping and the variables
9730 declared in the scope of the target body. */
9731 record_vars_into (ctx->block_vars, child_fn);
9732 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
9735 olist = NULL;
9736 ilist = NULL;
9737 if (ctx->record_type)
9739 ctx->sender_decl
9740 = create_tmp_var (ctx->record_type, ".omp_data_arr");
9741 DECL_NAMELESS (ctx->sender_decl) = 1;
9742 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9743 t = make_tree_vec (3);
9744 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
9745 TREE_VEC_ELT (t, 1)
9746 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
9747 ".omp_data_sizes");
9748 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
9749 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
9750 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
9751 TREE_VEC_ELT (t, 2)
9752 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
9753 map_cnt),
9754 ".omp_data_kinds");
9755 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
9756 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
9757 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
9758 gimple_omp_target_set_data_arg (stmt, t);
9760 vec<constructor_elt, va_gc> *vsize;
9761 vec<constructor_elt, va_gc> *vkind;
9762 vec_alloc (vsize, map_cnt);
9763 vec_alloc (vkind, map_cnt);
9764 unsigned int map_idx = 0;
9766 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9767 switch (OMP_CLAUSE_CODE (c))
9769 tree ovar, nc;
9771 default:
9772 break;
9773 case OMP_CLAUSE_MAP:
9774 case OMP_CLAUSE_TO:
9775 case OMP_CLAUSE_FROM:
9776 nc = c;
9777 ovar = OMP_CLAUSE_DECL (c);
9778 if (!DECL_P (ovar))
9780 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9781 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9783 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
9784 == get_base_address (ovar));
9785 nc = OMP_CLAUSE_CHAIN (c);
9786 ovar = OMP_CLAUSE_DECL (nc);
9788 else
9790 tree x = build_sender_ref (ovar, ctx);
9791 tree v
9792 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
9793 gimplify_assign (x, v, &ilist);
9794 nc = NULL_TREE;
9797 else
9799 if (DECL_SIZE (ovar)
9800 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
9802 tree ovar2 = DECL_VALUE_EXPR (ovar);
9803 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
9804 ovar2 = TREE_OPERAND (ovar2, 0);
9805 gcc_assert (DECL_P (ovar2));
9806 ovar = ovar2;
9808 if (!maybe_lookup_field (ovar, ctx))
9809 continue;
9812 if (nc)
9814 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
9815 tree x = build_sender_ref (ovar, ctx);
9816 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9817 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9818 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9819 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
9821 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9822 tree avar
9823 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
9824 mark_addressable (avar);
9825 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
9826 avar = build_fold_addr_expr (avar);
9827 gimplify_assign (x, avar, &ilist);
9829 else if (is_gimple_reg (var))
9831 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9832 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
9833 mark_addressable (avar);
9834 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
9835 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
9836 gimplify_assign (avar, var, &ilist);
9837 avar = build_fold_addr_expr (avar);
9838 gimplify_assign (x, avar, &ilist);
9839 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
9840 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
9841 && !TYPE_READONLY (TREE_TYPE (var)))
9843 x = build_sender_ref (ovar, ctx);
9844 x = build_simple_mem_ref (x);
9845 gimplify_assign (var, x, &olist);
9848 else
9850 var = build_fold_addr_expr (var);
9851 gimplify_assign (x, var, &ilist);
9854 tree s = OMP_CLAUSE_SIZE (c);
9855 if (s == NULL_TREE)
9856 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
9857 s = fold_convert (size_type_node, s);
9858 tree purpose = size_int (map_idx++);
9859 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
9860 if (TREE_CODE (s) != INTEGER_CST)
9861 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
9863 unsigned char tkind = 0;
9864 switch (OMP_CLAUSE_CODE (c))
9866 case OMP_CLAUSE_MAP:
9867 tkind = OMP_CLAUSE_MAP_KIND (c);
9868 break;
9869 case OMP_CLAUSE_TO:
9870 tkind = OMP_CLAUSE_MAP_TO;
9871 break;
9872 case OMP_CLAUSE_FROM:
9873 tkind = OMP_CLAUSE_MAP_FROM;
9874 break;
9875 default:
9876 gcc_unreachable ();
9878 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
9879 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
9880 talign = DECL_ALIGN_UNIT (ovar);
9881 talign = ceil_log2 (talign);
9882 tkind |= talign << 3;
9883 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
9884 build_int_cst (unsigned_char_type_node,
9885 tkind));
9886 if (nc && nc != c)
9887 c = nc;
9890 gcc_assert (map_idx == map_cnt);
9892 DECL_INITIAL (TREE_VEC_ELT (t, 1))
9893 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
9894 DECL_INITIAL (TREE_VEC_ELT (t, 2))
9895 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
9896 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
9898 gimple_seq initlist = NULL;
9899 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
9900 TREE_VEC_ELT (t, 1)),
9901 &initlist, true, NULL_TREE);
9902 gimple_seq_add_seq (&ilist, initlist);
9904 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
9905 NULL);
9906 TREE_THIS_VOLATILE (clobber) = 1;
9907 gimple_seq_add_stmt (&olist,
9908 gimple_build_assign (TREE_VEC_ELT (t, 1),
9909 clobber));
9912 tree clobber = build_constructor (ctx->record_type, NULL);
9913 TREE_THIS_VOLATILE (clobber) = 1;
9914 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9915 clobber));
9918 /* Once all the expansions are done, sequence all the different
9919 fragments inside gimple_omp_body. */
9921 new_body = NULL;
9923 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
9925 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9926 /* fixup_child_record_type might have changed receiver_decl's type. */
9927 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9928 gimple_seq_add_stmt (&new_body,
9929 gimple_build_assign (ctx->receiver_decl, t));
9932 if (kind == GF_OMP_TARGET_KIND_REGION)
9934 gimple_seq_add_seq (&new_body, tgt_body);
9935 new_body = maybe_catch_exception (new_body);
9937 else if (kind == GF_OMP_TARGET_KIND_DATA)
9938 new_body = tgt_body;
9939 if (kind != GF_OMP_TARGET_KIND_UPDATE)
9941 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9942 gimple_omp_set_body (stmt, new_body);
9945 bind = gimple_build_bind (NULL, NULL,
9946 tgt_bind ? gimple_bind_block (tgt_bind)
9947 : NULL_TREE);
9948 gsi_replace (gsi_p, bind, true);
9949 gimple_bind_add_seq (bind, ilist);
9950 gimple_bind_add_stmt (bind, stmt);
9951 gimple_bind_add_seq (bind, olist);
9953 pop_gimplify_context (NULL);
9956 /* Expand code for an OpenMP teams directive. */
9958 static void
9959 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9961 gimple teams_stmt = gsi_stmt (*gsi_p);
9962 push_gimplify_context ();
9964 tree block = make_node (BLOCK);
9965 gimple bind = gimple_build_bind (NULL, NULL, block);
9966 gsi_replace (gsi_p, bind, true);
9967 gimple_seq bind_body = NULL;
9968 gimple_seq dlist = NULL;
9969 gimple_seq olist = NULL;
9971 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9972 OMP_CLAUSE_NUM_TEAMS);
9973 if (num_teams == NULL_TREE)
9974 num_teams = build_int_cst (unsigned_type_node, 0);
9975 else
9977 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
9978 num_teams = fold_convert (unsigned_type_node, num_teams);
9979 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
9981 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9982 OMP_CLAUSE_THREAD_LIMIT);
9983 if (thread_limit == NULL_TREE)
9984 thread_limit = build_int_cst (unsigned_type_node, 0);
9985 else
9987 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
9988 thread_limit = fold_convert (unsigned_type_node, thread_limit);
9989 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
9990 fb_rvalue);
9993 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
9994 &bind_body, &dlist, ctx, NULL);
9995 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
9996 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
9997 gimple_seq_add_stmt (&bind_body, teams_stmt);
9999 location_t loc = gimple_location (teams_stmt);
10000 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
10001 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
10002 gimple_set_location (call, loc);
10003 gimple_seq_add_stmt (&bind_body, call);
10005 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
10006 gimple_omp_set_body (teams_stmt, NULL);
10007 gimple_seq_add_seq (&bind_body, olist);
10008 gimple_seq_add_seq (&bind_body, dlist);
10009 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
10010 gimple_bind_set_body (bind, bind_body);
10012 pop_gimplify_context (bind);
10014 gimple_bind_append_vars (bind, ctx->block_vars);
10015 BLOCK_VARS (block) = ctx->block_vars;
10016 if (BLOCK_VARS (block))
10017 TREE_USED (block) = 1;
10021 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
10022 regimplified. If DATA is non-NULL, lower_omp_1 is outside
10023 of OpenMP context, but with task_shared_vars set. */
10025 static tree
10026 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
10027 void *data)
10029 tree t = *tp;
10031 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
10032 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
10033 return t;
10035 if (task_shared_vars
10036 && DECL_P (t)
10037 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
10038 return t;
10040 /* If a global variable has been privatized, TREE_CONSTANT on
10041 ADDR_EXPR might be wrong. */
10042 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
10043 recompute_tree_invariant_for_addr_expr (t);
10045 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
10046 return NULL_TREE;
10049 static void
10050 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10052 gimple stmt = gsi_stmt (*gsi_p);
10053 struct walk_stmt_info wi;
10055 if (gimple_has_location (stmt))
10056 input_location = gimple_location (stmt);
10058 if (task_shared_vars)
10059 memset (&wi, '\0', sizeof (wi));
10061 /* If we have issued syntax errors, avoid doing any heavy lifting.
10062 Just replace the OpenMP directives with a NOP to avoid
10063 confusing RTL expansion. */
10064 if (seen_error () && is_gimple_omp (stmt))
10066 gsi_replace (gsi_p, gimple_build_nop (), true);
10067 return;
10070 switch (gimple_code (stmt))
10072 case GIMPLE_COND:
10073 if ((ctx || task_shared_vars)
10074 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
10075 ctx ? NULL : &wi, NULL)
10076 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
10077 ctx ? NULL : &wi, NULL)))
10078 gimple_regimplify_operands (stmt, gsi_p);
10079 break;
10080 case GIMPLE_CATCH:
10081 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
10082 break;
10083 case GIMPLE_EH_FILTER:
10084 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
10085 break;
10086 case GIMPLE_TRY:
10087 lower_omp (gimple_try_eval_ptr (stmt), ctx);
10088 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
10089 break;
10090 case GIMPLE_TRANSACTION:
10091 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
10092 break;
10093 case GIMPLE_BIND:
10094 lower_omp (gimple_bind_body_ptr (stmt), ctx);
10095 break;
10096 case GIMPLE_OMP_PARALLEL:
10097 case GIMPLE_OMP_TASK:
10098 ctx = maybe_lookup_ctx (stmt);
10099 gcc_assert (ctx);
10100 if (ctx->cancellable)
10101 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10102 lower_omp_taskreg (gsi_p, ctx);
10103 break;
10104 case GIMPLE_OMP_FOR:
10105 ctx = maybe_lookup_ctx (stmt);
10106 gcc_assert (ctx);
10107 if (ctx->cancellable)
10108 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10109 lower_omp_for (gsi_p, ctx);
10110 break;
10111 case GIMPLE_OMP_SECTIONS:
10112 ctx = maybe_lookup_ctx (stmt);
10113 gcc_assert (ctx);
10114 if (ctx->cancellable)
10115 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10116 lower_omp_sections (gsi_p, ctx);
10117 break;
10118 case GIMPLE_OMP_SINGLE:
10119 ctx = maybe_lookup_ctx (stmt);
10120 gcc_assert (ctx);
10121 lower_omp_single (gsi_p, ctx);
10122 break;
10123 case GIMPLE_OMP_MASTER:
10124 ctx = maybe_lookup_ctx (stmt);
10125 gcc_assert (ctx);
10126 lower_omp_master (gsi_p, ctx);
10127 break;
10128 case GIMPLE_OMP_TASKGROUP:
10129 ctx = maybe_lookup_ctx (stmt);
10130 gcc_assert (ctx);
10131 lower_omp_taskgroup (gsi_p, ctx);
10132 break;
10133 case GIMPLE_OMP_ORDERED:
10134 ctx = maybe_lookup_ctx (stmt);
10135 gcc_assert (ctx);
10136 lower_omp_ordered (gsi_p, ctx);
10137 break;
10138 case GIMPLE_OMP_CRITICAL:
10139 ctx = maybe_lookup_ctx (stmt);
10140 gcc_assert (ctx);
10141 lower_omp_critical (gsi_p, ctx);
10142 break;
10143 case GIMPLE_OMP_ATOMIC_LOAD:
10144 if ((ctx || task_shared_vars)
10145 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
10146 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
10147 gimple_regimplify_operands (stmt, gsi_p);
10148 break;
10149 case GIMPLE_OMP_TARGET:
10150 ctx = maybe_lookup_ctx (stmt);
10151 gcc_assert (ctx);
10152 lower_omp_target (gsi_p, ctx);
10153 break;
10154 case GIMPLE_OMP_TEAMS:
10155 ctx = maybe_lookup_ctx (stmt);
10156 gcc_assert (ctx);
10157 lower_omp_teams (gsi_p, ctx);
10158 break;
10159 case GIMPLE_CALL:
10160 tree fndecl;
10161 fndecl = gimple_call_fndecl (stmt);
10162 if (fndecl
10163 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10164 switch (DECL_FUNCTION_CODE (fndecl))
10166 case BUILT_IN_GOMP_BARRIER:
10167 if (ctx == NULL)
10168 break;
10169 /* FALLTHRU */
10170 case BUILT_IN_GOMP_CANCEL:
10171 case BUILT_IN_GOMP_CANCELLATION_POINT:
10172 omp_context *cctx;
10173 cctx = ctx;
10174 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10175 cctx = cctx->outer;
10176 gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
10177 if (!cctx->cancellable)
10179 if (DECL_FUNCTION_CODE (fndecl)
10180 == BUILT_IN_GOMP_CANCELLATION_POINT)
10182 stmt = gimple_build_nop ();
10183 gsi_replace (gsi_p, stmt, false);
10185 break;
10187 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10189 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10190 gimple_call_set_fndecl (stmt, fndecl);
10191 gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
10193 tree lhs;
10194 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)), NULL);
10195 gimple_call_set_lhs (stmt, lhs);
10196 tree fallthru_label;
10197 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10198 gimple g;
10199 g = gimple_build_label (fallthru_label);
10200 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10201 g = gimple_build_cond (NE_EXPR, lhs,
10202 fold_convert (TREE_TYPE (lhs),
10203 boolean_false_node),
10204 cctx->cancel_label, fallthru_label);
10205 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10206 break;
10207 default:
10208 break;
10210 /* FALLTHRU */
10211 default:
10212 if ((ctx || task_shared_vars)
10213 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10214 ctx ? NULL : &wi))
10216 /* Just remove clobbers, this should happen only if we have
10217 "privatized" local addressable variables in SIMD regions,
10218 the clobber isn't needed in that case and gimplifying address
10219 of the ARRAY_REF into a pointer and creating MEM_REF based
10220 clobber would create worse code than we get with the clobber
10221 dropped. */
10222 if (gimple_clobber_p (stmt))
10224 gsi_replace (gsi_p, gimple_build_nop (), true);
10225 break;
10227 gimple_regimplify_operands (stmt, gsi_p);
10229 break;
10233 static void
10234 lower_omp (gimple_seq *body, omp_context *ctx)
10236 location_t saved_location = input_location;
10237 gimple_stmt_iterator gsi;
10238 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10239 lower_omp_1 (&gsi, ctx);
10240 /* During gimplification, we have not always invoked fold_stmt
10241 (gimplify.c:maybe_fold_stmt); call it now. */
10242 if (target_nesting_level)
10243 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10244 fold_stmt (&gsi);
10245 input_location = saved_location;
10248 /* Main entry point. */
10250 static unsigned int
10251 execute_lower_omp (void)
10253 gimple_seq body;
10255 /* This pass always runs, to provide PROP_gimple_lomp.
10256 But there is nothing to do unless -fopenmp is given. */
10257 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_cilkplus == 0)
10258 return 0;
10260 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10261 delete_omp_context);
10263 body = gimple_body (current_function_decl);
10264 scan_omp (&body, NULL);
10265 gcc_assert (taskreg_nesting_level == 0);
10267 if (all_contexts->root)
10269 if (task_shared_vars)
10270 push_gimplify_context ();
10271 lower_omp (&body, NULL);
10272 if (task_shared_vars)
10273 pop_gimplify_context (NULL);
10276 if (all_contexts)
10278 splay_tree_delete (all_contexts);
10279 all_contexts = NULL;
10281 BITMAP_FREE (task_shared_vars);
10282 return 0;
10285 namespace {
10287 const pass_data pass_data_lower_omp =
10289 GIMPLE_PASS, /* type */
10290 "omplower", /* name */
10291 OPTGROUP_NONE, /* optinfo_flags */
10292 true, /* has_execute */
10293 TV_NONE, /* tv_id */
10294 PROP_gimple_any, /* properties_required */
10295 PROP_gimple_lomp, /* properties_provided */
10296 0, /* properties_destroyed */
10297 0, /* todo_flags_start */
10298 0, /* todo_flags_finish */
10301 class pass_lower_omp : public gimple_opt_pass
10303 public:
10304 pass_lower_omp (gcc::context *ctxt)
10305 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10308 /* opt_pass methods: */
10309 virtual unsigned int execute (function *) { return execute_lower_omp (); }
10311 }; // class pass_lower_omp
10313 } // anon namespace
10315 gimple_opt_pass *
10316 make_pass_lower_omp (gcc::context *ctxt)
10318 return new pass_lower_omp (ctxt);
10321 /* The following is a utility to diagnose OpenMP structured block violations.
10322 It is not part of the "omplower" pass, as that's invoked too late. It
10323 should be invoked by the respective front ends after gimplification. */
10325 static splay_tree all_labels;
10327 /* Check for mismatched contexts and generate an error if needed. Return
10328 true if an error is detected. */
10330 static bool
10331 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10332 gimple branch_ctx, gimple label_ctx)
10334 if (label_ctx == branch_ctx)
10335 return false;
10339 Previously we kept track of the label's entire context in diagnose_sb_[12]
10340 so we could traverse it and issue a correct "exit" or "enter" error
10341 message upon a structured block violation.
10343 We built the context by building a list with tree_cons'ing, but there is
10344 no easy counterpart in gimple tuples. It seems like far too much work
10345 for issuing exit/enter error messages. If someone really misses the
10346 distinct error message... patches welcome.
10349 #if 0
10350 /* Try to avoid confusing the user by producing and error message
10351 with correct "exit" or "enter" verbiage. We prefer "exit"
10352 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10353 if (branch_ctx == NULL)
10354 exit_p = false;
10355 else
10357 while (label_ctx)
10359 if (TREE_VALUE (label_ctx) == branch_ctx)
10361 exit_p = false;
10362 break;
10364 label_ctx = TREE_CHAIN (label_ctx);
10368 if (exit_p)
10369 error ("invalid exit from OpenMP structured block");
10370 else
10371 error ("invalid entry to OpenMP structured block");
10372 #endif
10374 bool cilkplus_block = false;
10375 if (flag_cilkplus)
10377 if ((branch_ctx
10378 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10379 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10380 || (label_ctx
10381 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
10382 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10383 cilkplus_block = true;
10386 /* If it's obvious we have an invalid entry, be specific about the error. */
10387 if (branch_ctx == NULL)
10389 if (cilkplus_block)
10390 error ("invalid entry to Cilk Plus structured block");
10391 else
10392 error ("invalid entry to OpenMP structured block");
10394 else
10396 /* Otherwise, be vague and lazy, but efficient. */
10397 if (cilkplus_block)
10398 error ("invalid branch to/from a Cilk Plus structured block");
10399 else
10400 error ("invalid branch to/from an OpenMP structured block");
10403 gsi_replace (gsi_p, gimple_build_nop (), false);
10404 return true;
10407 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10408 where each label is found. */
10410 static tree
10411 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10412 struct walk_stmt_info *wi)
10414 gimple context = (gimple) wi->info;
10415 gimple inner_context;
10416 gimple stmt = gsi_stmt (*gsi_p);
10418 *handled_ops_p = true;
10420 switch (gimple_code (stmt))
10422 WALK_SUBSTMTS;
10424 case GIMPLE_OMP_PARALLEL:
10425 case GIMPLE_OMP_TASK:
10426 case GIMPLE_OMP_SECTIONS:
10427 case GIMPLE_OMP_SINGLE:
10428 case GIMPLE_OMP_SECTION:
10429 case GIMPLE_OMP_MASTER:
10430 case GIMPLE_OMP_ORDERED:
10431 case GIMPLE_OMP_CRITICAL:
10432 case GIMPLE_OMP_TARGET:
10433 case GIMPLE_OMP_TEAMS:
10434 case GIMPLE_OMP_TASKGROUP:
10435 /* The minimal context here is just the current OMP construct. */
10436 inner_context = stmt;
10437 wi->info = inner_context;
10438 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10439 wi->info = context;
10440 break;
10442 case GIMPLE_OMP_FOR:
10443 inner_context = stmt;
10444 wi->info = inner_context;
10445 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10446 walk them. */
10447 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10448 diagnose_sb_1, NULL, wi);
10449 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10450 wi->info = context;
10451 break;
10453 case GIMPLE_LABEL:
10454 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
10455 (splay_tree_value) context);
10456 break;
10458 default:
10459 break;
10462 return NULL_TREE;
10465 /* Pass 2: Check each branch and see if its context differs from that of
10466 the destination label's context. */
10468 static tree
10469 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10470 struct walk_stmt_info *wi)
10472 gimple context = (gimple) wi->info;
10473 splay_tree_node n;
10474 gimple stmt = gsi_stmt (*gsi_p);
10476 *handled_ops_p = true;
10478 switch (gimple_code (stmt))
10480 WALK_SUBSTMTS;
10482 case GIMPLE_OMP_PARALLEL:
10483 case GIMPLE_OMP_TASK:
10484 case GIMPLE_OMP_SECTIONS:
10485 case GIMPLE_OMP_SINGLE:
10486 case GIMPLE_OMP_SECTION:
10487 case GIMPLE_OMP_MASTER:
10488 case GIMPLE_OMP_ORDERED:
10489 case GIMPLE_OMP_CRITICAL:
10490 case GIMPLE_OMP_TARGET:
10491 case GIMPLE_OMP_TEAMS:
10492 case GIMPLE_OMP_TASKGROUP:
10493 wi->info = stmt;
10494 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10495 wi->info = context;
10496 break;
10498 case GIMPLE_OMP_FOR:
10499 wi->info = stmt;
10500 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10501 walk them. */
10502 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10503 diagnose_sb_2, NULL, wi);
10504 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10505 wi->info = context;
10506 break;
10508 case GIMPLE_COND:
10510 tree lab = gimple_cond_true_label (stmt);
10511 if (lab)
10513 n = splay_tree_lookup (all_labels,
10514 (splay_tree_key) lab);
10515 diagnose_sb_0 (gsi_p, context,
10516 n ? (gimple) n->value : NULL);
10518 lab = gimple_cond_false_label (stmt);
10519 if (lab)
10521 n = splay_tree_lookup (all_labels,
10522 (splay_tree_key) lab);
10523 diagnose_sb_0 (gsi_p, context,
10524 n ? (gimple) n->value : NULL);
10527 break;
10529 case GIMPLE_GOTO:
10531 tree lab = gimple_goto_dest (stmt);
10532 if (TREE_CODE (lab) != LABEL_DECL)
10533 break;
10535 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10536 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
10538 break;
10540 case GIMPLE_SWITCH:
10542 unsigned int i;
10543 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
10545 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
10546 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10547 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
10548 break;
10551 break;
10553 case GIMPLE_RETURN:
10554 diagnose_sb_0 (gsi_p, context, NULL);
10555 break;
10557 default:
10558 break;
10561 return NULL_TREE;
10564 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10565 codes. */
10566 bool
10567 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
10568 int *region_idx)
10570 gimple last = last_stmt (bb);
10571 enum gimple_code code = gimple_code (last);
10572 struct omp_region *cur_region = *region;
10573 bool fallthru = false;
10575 switch (code)
10577 case GIMPLE_OMP_PARALLEL:
10578 case GIMPLE_OMP_TASK:
10579 case GIMPLE_OMP_FOR:
10580 case GIMPLE_OMP_SINGLE:
10581 case GIMPLE_OMP_TEAMS:
10582 case GIMPLE_OMP_MASTER:
10583 case GIMPLE_OMP_TASKGROUP:
10584 case GIMPLE_OMP_ORDERED:
10585 case GIMPLE_OMP_CRITICAL:
10586 case GIMPLE_OMP_SECTION:
10587 cur_region = new_omp_region (bb, code, cur_region);
10588 fallthru = true;
10589 break;
10591 case GIMPLE_OMP_TARGET:
10592 cur_region = new_omp_region (bb, code, cur_region);
10593 fallthru = true;
10594 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
10595 cur_region = cur_region->outer;
10596 break;
10598 case GIMPLE_OMP_SECTIONS:
10599 cur_region = new_omp_region (bb, code, cur_region);
10600 fallthru = true;
10601 break;
10603 case GIMPLE_OMP_SECTIONS_SWITCH:
10604 fallthru = false;
10605 break;
10607 case GIMPLE_OMP_ATOMIC_LOAD:
10608 case GIMPLE_OMP_ATOMIC_STORE:
10609 fallthru = true;
10610 break;
10612 case GIMPLE_OMP_RETURN:
10613 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
10614 somewhere other than the next block. This will be
10615 created later. */
10616 cur_region->exit = bb;
10617 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
10618 cur_region = cur_region->outer;
10619 break;
10621 case GIMPLE_OMP_CONTINUE:
10622 cur_region->cont = bb;
10623 switch (cur_region->type)
10625 case GIMPLE_OMP_FOR:
10626 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
10627 succs edges as abnormal to prevent splitting
10628 them. */
10629 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
10630 /* Make the loopback edge. */
10631 make_edge (bb, single_succ (cur_region->entry),
10632 EDGE_ABNORMAL);
10634 /* Create an edge from GIMPLE_OMP_FOR to exit, which
10635 corresponds to the case that the body of the loop
10636 is not executed at all. */
10637 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
10638 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
10639 fallthru = false;
10640 break;
10642 case GIMPLE_OMP_SECTIONS:
10643 /* Wire up the edges into and out of the nested sections. */
10645 basic_block switch_bb = single_succ (cur_region->entry);
10647 struct omp_region *i;
10648 for (i = cur_region->inner; i ; i = i->next)
10650 gcc_assert (i->type == GIMPLE_OMP_SECTION);
10651 make_edge (switch_bb, i->entry, 0);
10652 make_edge (i->exit, bb, EDGE_FALLTHRU);
10655 /* Make the loopback edge to the block with
10656 GIMPLE_OMP_SECTIONS_SWITCH. */
10657 make_edge (bb, switch_bb, 0);
10659 /* Make the edge from the switch to exit. */
10660 make_edge (switch_bb, bb->next_bb, 0);
10661 fallthru = false;
10663 break;
10665 default:
10666 gcc_unreachable ();
10668 break;
10670 default:
10671 gcc_unreachable ();
10674 if (*region != cur_region)
10676 *region = cur_region;
10677 if (cur_region)
10678 *region_idx = cur_region->entry->index;
10679 else
10680 *region_idx = 0;
10683 return fallthru;
10686 static unsigned int
10687 diagnose_omp_structured_block_errors (void)
10689 struct walk_stmt_info wi;
10690 gimple_seq body = gimple_body (current_function_decl);
10692 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
10694 memset (&wi, 0, sizeof (wi));
10695 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
10697 memset (&wi, 0, sizeof (wi));
10698 wi.want_locations = true;
10699 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
10701 gimple_set_body (current_function_decl, body);
10703 splay_tree_delete (all_labels);
10704 all_labels = NULL;
10706 return 0;
10709 namespace {
10711 const pass_data pass_data_diagnose_omp_blocks =
10713 GIMPLE_PASS, /* type */
10714 "*diagnose_omp_blocks", /* name */
10715 OPTGROUP_NONE, /* optinfo_flags */
10716 true, /* has_execute */
10717 TV_NONE, /* tv_id */
10718 PROP_gimple_any, /* properties_required */
10719 0, /* properties_provided */
10720 0, /* properties_destroyed */
10721 0, /* todo_flags_start */
10722 0, /* todo_flags_finish */
10725 class pass_diagnose_omp_blocks : public gimple_opt_pass
10727 public:
10728 pass_diagnose_omp_blocks (gcc::context *ctxt)
10729 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
10732 /* opt_pass methods: */
10733 virtual bool gate (function *) { return flag_openmp || flag_cilkplus; }
10734 virtual unsigned int execute (function *)
10736 return diagnose_omp_structured_block_errors ();
10739 }; // class pass_diagnose_omp_blocks
10741 } // anon namespace
10743 gimple_opt_pass *
10744 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
10746 return new pass_diagnose_omp_blocks (ctxt);
10749 /* SIMD clone supporting code. */
10751 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
10752 of arguments to reserve space for. */
10754 static struct cgraph_simd_clone *
10755 simd_clone_struct_alloc (int nargs)
10757 struct cgraph_simd_clone *clone_info;
10758 size_t len = (sizeof (struct cgraph_simd_clone)
10759 + nargs * sizeof (struct cgraph_simd_clone_arg));
10760 clone_info = (struct cgraph_simd_clone *)
10761 ggc_internal_cleared_alloc (len);
10762 return clone_info;
10765 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
10767 static inline void
10768 simd_clone_struct_copy (struct cgraph_simd_clone *to,
10769 struct cgraph_simd_clone *from)
10771 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
10772 + ((from->nargs - from->inbranch)
10773 * sizeof (struct cgraph_simd_clone_arg))));
10776 /* Return vector of parameter types of function FNDECL. This uses
10777 TYPE_ARG_TYPES if available, otherwise falls back to types of
10778 DECL_ARGUMENTS types. */
10780 vec<tree>
10781 simd_clone_vector_of_formal_parm_types (tree fndecl)
10783 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
10784 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
10785 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
10786 unsigned int i;
10787 tree arg;
10788 FOR_EACH_VEC_ELT (args, i, arg)
10789 args[i] = TREE_TYPE (args[i]);
10790 return args;
10793 /* Given a simd function in NODE, extract the simd specific
10794 information from the OMP clauses passed in CLAUSES, and return
10795 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
10796 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
10797 otherwise set to FALSE. */
10799 static struct cgraph_simd_clone *
10800 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
10801 bool *inbranch_specified)
10803 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
10804 tree t;
10805 int n;
10806 *inbranch_specified = false;
10808 n = args.length ();
10809 if (n > 0 && args.last () == void_type_node)
10810 n--;
10812 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
10813 be cloned have a distinctive artificial label in addition to "omp
10814 declare simd". */
10815 bool cilk_clone
10816 = (flag_cilkplus
10817 && lookup_attribute ("cilk simd function",
10818 DECL_ATTRIBUTES (node->decl)));
10820 /* Allocate one more than needed just in case this is an in-branch
10821 clone which will require a mask argument. */
10822 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
10823 clone_info->nargs = n;
10824 clone_info->cilk_elemental = cilk_clone;
10826 if (!clauses)
10828 args.release ();
10829 return clone_info;
10831 clauses = TREE_VALUE (clauses);
10832 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
10833 return clone_info;
10835 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
10837 switch (OMP_CLAUSE_CODE (t))
10839 case OMP_CLAUSE_INBRANCH:
10840 clone_info->inbranch = 1;
10841 *inbranch_specified = true;
10842 break;
10843 case OMP_CLAUSE_NOTINBRANCH:
10844 clone_info->inbranch = 0;
10845 *inbranch_specified = true;
10846 break;
10847 case OMP_CLAUSE_SIMDLEN:
10848 clone_info->simdlen
10849 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
10850 break;
10851 case OMP_CLAUSE_LINEAR:
10853 tree decl = OMP_CLAUSE_DECL (t);
10854 tree step = OMP_CLAUSE_LINEAR_STEP (t);
10855 int argno = TREE_INT_CST_LOW (decl);
10856 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
10858 clone_info->args[argno].arg_type
10859 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
10860 clone_info->args[argno].linear_step = tree_to_shwi (step);
10861 gcc_assert (clone_info->args[argno].linear_step >= 0
10862 && clone_info->args[argno].linear_step < n);
10864 else
10866 if (POINTER_TYPE_P (args[argno]))
10867 step = fold_convert (ssizetype, step);
10868 if (!tree_fits_shwi_p (step))
10870 warning_at (OMP_CLAUSE_LOCATION (t), 0,
10871 "ignoring large linear step");
10872 args.release ();
10873 return NULL;
10875 else if (integer_zerop (step))
10877 warning_at (OMP_CLAUSE_LOCATION (t), 0,
10878 "ignoring zero linear step");
10879 args.release ();
10880 return NULL;
10882 else
10884 clone_info->args[argno].arg_type
10885 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
10886 clone_info->args[argno].linear_step = tree_to_shwi (step);
10889 break;
10891 case OMP_CLAUSE_UNIFORM:
10893 tree decl = OMP_CLAUSE_DECL (t);
10894 int argno = tree_to_uhwi (decl);
10895 clone_info->args[argno].arg_type
10896 = SIMD_CLONE_ARG_TYPE_UNIFORM;
10897 break;
10899 case OMP_CLAUSE_ALIGNED:
10901 tree decl = OMP_CLAUSE_DECL (t);
10902 int argno = tree_to_uhwi (decl);
10903 clone_info->args[argno].alignment
10904 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
10905 break;
10907 default:
10908 break;
10911 args.release ();
10912 return clone_info;
10915 /* Given a SIMD clone in NODE, calculate the characteristic data
10916 type and return the coresponding type. The characteristic data
10917 type is computed as described in the Intel Vector ABI. */
10919 static tree
10920 simd_clone_compute_base_data_type (struct cgraph_node *node,
10921 struct cgraph_simd_clone *clone_info)
10923 tree type = integer_type_node;
10924 tree fndecl = node->decl;
10926 /* a) For non-void function, the characteristic data type is the
10927 return type. */
10928 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
10929 type = TREE_TYPE (TREE_TYPE (fndecl));
10931 /* b) If the function has any non-uniform, non-linear parameters,
10932 then the characteristic data type is the type of the first
10933 such parameter. */
10934 else
10936 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
10937 for (unsigned int i = 0; i < clone_info->nargs; ++i)
10938 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
10940 type = map[i];
10941 break;
10943 map.release ();
10946 /* c) If the characteristic data type determined by a) or b) above
10947 is struct, union, or class type which is pass-by-value (except
10948 for the type that maps to the built-in complex data type), the
10949 characteristic data type is int. */
10950 if (RECORD_OR_UNION_TYPE_P (type)
10951 && !aggregate_value_p (type, NULL)
10952 && TREE_CODE (type) != COMPLEX_TYPE)
10953 return integer_type_node;
10955 /* d) If none of the above three classes is applicable, the
10956 characteristic data type is int. */
10958 return type;
10960 /* e) For Intel Xeon Phi native and offload compilation, if the
10961 resulting characteristic data type is 8-bit or 16-bit integer
10962 data type, the characteristic data type is int. */
10963 /* Well, we don't handle Xeon Phi yet. */
10966 static tree
10967 simd_clone_mangle (struct cgraph_node *node,
10968 struct cgraph_simd_clone *clone_info)
10970 char vecsize_mangle = clone_info->vecsize_mangle;
10971 char mask = clone_info->inbranch ? 'M' : 'N';
10972 unsigned int simdlen = clone_info->simdlen;
10973 unsigned int n;
10974 pretty_printer pp;
10976 gcc_assert (vecsize_mangle && simdlen);
10978 pp_string (&pp, "_ZGV");
10979 pp_character (&pp, vecsize_mangle);
10980 pp_character (&pp, mask);
10981 pp_decimal_int (&pp, simdlen);
10983 for (n = 0; n < clone_info->nargs; ++n)
10985 struct cgraph_simd_clone_arg arg = clone_info->args[n];
10987 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
10988 pp_character (&pp, 'u');
10989 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
10991 gcc_assert (arg.linear_step != 0);
10992 pp_character (&pp, 'l');
10993 if (arg.linear_step > 1)
10994 pp_unsigned_wide_integer (&pp, arg.linear_step);
10995 else if (arg.linear_step < 0)
10997 pp_character (&pp, 'n');
10998 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
10999 arg.linear_step));
11002 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
11004 pp_character (&pp, 's');
11005 pp_unsigned_wide_integer (&pp, arg.linear_step);
11007 else
11008 pp_character (&pp, 'v');
11009 if (arg.alignment)
11011 pp_character (&pp, 'a');
11012 pp_decimal_int (&pp, arg.alignment);
11016 pp_underscore (&pp);
11017 pp_string (&pp,
11018 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)));
11019 const char *str = pp_formatted_text (&pp);
11021 /* If there already is a SIMD clone with the same mangled name, don't
11022 add another one. This can happen e.g. for
11023 #pragma omp declare simd
11024 #pragma omp declare simd simdlen(8)
11025 int foo (int, int);
11026 if the simdlen is assumed to be 8 for the first one, etc. */
11027 for (struct cgraph_node *clone = node->simd_clones; clone;
11028 clone = clone->simdclone->next_clone)
11029 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
11030 str) == 0)
11031 return NULL_TREE;
11033 return get_identifier (str);
11036 /* Create a simd clone of OLD_NODE and return it. */
11038 static struct cgraph_node *
11039 simd_clone_create (struct cgraph_node *old_node)
11041 struct cgraph_node *new_node;
11042 if (old_node->definition)
11044 if (!cgraph_function_with_gimple_body_p (old_node))
11045 return NULL;
11046 cgraph_get_body (old_node);
11047 new_node = cgraph_function_versioning (old_node, vNULL, NULL, NULL,
11048 false, NULL, NULL, "simdclone");
11050 else
11052 tree old_decl = old_node->decl;
11053 tree new_decl = copy_node (old_node->decl);
11054 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
11055 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
11056 SET_DECL_RTL (new_decl, NULL);
11057 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
11058 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
11059 new_node
11060 = cgraph_copy_node_for_versioning (old_node, new_decl, vNULL, NULL);
11061 cgraph_call_function_insertion_hooks (new_node);
11063 if (new_node == NULL)
11064 return new_node;
11066 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
11068 /* The function cgraph_function_versioning () will force the new
11069 symbol local. Undo this, and inherit external visability from
11070 the old node. */
11071 new_node->local.local = old_node->local.local;
11072 new_node->externally_visible = old_node->externally_visible;
11074 return new_node;
11077 /* Adjust the return type of the given function to its appropriate
11078 vector counterpart. Returns a simd array to be used throughout the
11079 function as a return value. */
11081 static tree
11082 simd_clone_adjust_return_type (struct cgraph_node *node)
11084 tree fndecl = node->decl;
11085 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
11086 unsigned int veclen;
11087 tree t;
11089 /* Adjust the function return type. */
11090 if (orig_rettype == void_type_node)
11091 return NULL_TREE;
11092 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
11093 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))
11094 || POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl))))
11095 veclen = node->simdclone->vecsize_int;
11096 else
11097 veclen = node->simdclone->vecsize_float;
11098 veclen /= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl))));
11099 if (veclen > node->simdclone->simdlen)
11100 veclen = node->simdclone->simdlen;
11101 if (veclen == node->simdclone->simdlen)
11102 TREE_TYPE (TREE_TYPE (fndecl))
11103 = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)),
11104 node->simdclone->simdlen);
11105 else
11107 t = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)), veclen);
11108 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
11109 TREE_TYPE (TREE_TYPE (fndecl)) = t;
11111 if (!node->definition)
11112 return NULL_TREE;
11114 t = DECL_RESULT (fndecl);
11115 /* Adjust the DECL_RESULT. */
11116 gcc_assert (TREE_TYPE (t) != void_type_node);
11117 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
11118 relayout_decl (t);
11120 tree atype = build_array_type_nelts (orig_rettype,
11121 node->simdclone->simdlen);
11122 if (veclen != node->simdclone->simdlen)
11123 return build1 (VIEW_CONVERT_EXPR, atype, t);
11125 /* Set up a SIMD array to use as the return value. */
11126 tree retval = create_tmp_var_raw (atype, "retval");
11127 gimple_add_tmp_var (retval);
11128 return retval;
11131 /* Each vector argument has a corresponding array to be used locally
11132 as part of the eventual loop. Create such temporary array and
11133 return it.
11135 PREFIX is the prefix to be used for the temporary.
11137 TYPE is the inner element type.
11139 SIMDLEN is the number of elements. */
11141 static tree
11142 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
11144 tree atype = build_array_type_nelts (type, simdlen);
11145 tree avar = create_tmp_var_raw (atype, prefix);
11146 gimple_add_tmp_var (avar);
11147 return avar;
11150 /* Modify the function argument types to their corresponding vector
11151 counterparts if appropriate. Also, create one array for each simd
11152 argument to be used locally when using the function arguments as
11153 part of the loop.
11155 NODE is the function whose arguments are to be adjusted.
11157 Returns an adjustment vector that will be filled describing how the
11158 argument types will be adjusted. */
11160 static ipa_parm_adjustment_vec
11161 simd_clone_adjust_argument_types (struct cgraph_node *node)
11163 vec<tree> args;
11164 ipa_parm_adjustment_vec adjustments;
11166 if (node->definition)
11167 args = ipa_get_vector_of_formal_parms (node->decl);
11168 else
11169 args = simd_clone_vector_of_formal_parm_types (node->decl);
11170 adjustments.create (args.length ());
11171 unsigned i, j, veclen;
11172 struct ipa_parm_adjustment adj;
11173 for (i = 0; i < node->simdclone->nargs; ++i)
11175 memset (&adj, 0, sizeof (adj));
11176 tree parm = args[i];
11177 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
11178 adj.base_index = i;
11179 adj.base = parm;
11181 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
11182 node->simdclone->args[i].orig_type = parm_type;
11184 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
11186 /* No adjustment necessary for scalar arguments. */
11187 adj.op = IPA_PARM_OP_COPY;
11189 else
11191 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
11192 veclen = node->simdclone->vecsize_int;
11193 else
11194 veclen = node->simdclone->vecsize_float;
11195 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
11196 if (veclen > node->simdclone->simdlen)
11197 veclen = node->simdclone->simdlen;
11198 adj.arg_prefix = "simd";
11199 adj.type = build_vector_type (parm_type, veclen);
11200 node->simdclone->args[i].vector_type = adj.type;
11201 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11203 adjustments.safe_push (adj);
11204 if (j == veclen)
11206 memset (&adj, 0, sizeof (adj));
11207 adj.op = IPA_PARM_OP_NEW;
11208 adj.arg_prefix = "simd";
11209 adj.base_index = i;
11210 adj.type = node->simdclone->args[i].vector_type;
11214 if (node->definition)
11215 node->simdclone->args[i].simd_array
11216 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
11217 parm_type, node->simdclone->simdlen);
11219 adjustments.safe_push (adj);
11222 if (node->simdclone->inbranch)
11224 tree base_type
11225 = simd_clone_compute_base_data_type (node->simdclone->origin,
11226 node->simdclone);
11228 memset (&adj, 0, sizeof (adj));
11229 adj.op = IPA_PARM_OP_NEW;
11230 adj.arg_prefix = "mask";
11232 adj.base_index = i;
11233 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
11234 veclen = node->simdclone->vecsize_int;
11235 else
11236 veclen = node->simdclone->vecsize_float;
11237 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
11238 if (veclen > node->simdclone->simdlen)
11239 veclen = node->simdclone->simdlen;
11240 adj.type = build_vector_type (base_type, veclen);
11241 adjustments.safe_push (adj);
11243 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11244 adjustments.safe_push (adj);
11246 /* We have previously allocated one extra entry for the mask. Use
11247 it and fill it. */
11248 struct cgraph_simd_clone *sc = node->simdclone;
11249 sc->nargs++;
11250 if (node->definition)
11252 sc->args[i].orig_arg
11253 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
11254 sc->args[i].simd_array
11255 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
11257 sc->args[i].orig_type = base_type;
11258 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
11261 if (node->definition)
11262 ipa_modify_formal_parameters (node->decl, adjustments);
11263 else
11265 tree new_arg_types = NULL_TREE, new_reversed;
11266 bool last_parm_void = false;
11267 if (args.length () > 0 && args.last () == void_type_node)
11268 last_parm_void = true;
11270 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
11271 j = adjustments.length ();
11272 for (i = 0; i < j; i++)
11274 struct ipa_parm_adjustment *adj = &adjustments[i];
11275 tree ptype;
11276 if (adj->op == IPA_PARM_OP_COPY)
11277 ptype = args[adj->base_index];
11278 else
11279 ptype = adj->type;
11280 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
11282 new_reversed = nreverse (new_arg_types);
11283 if (last_parm_void)
11285 if (new_reversed)
11286 TREE_CHAIN (new_arg_types) = void_list_node;
11287 else
11288 new_reversed = void_list_node;
11291 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
11292 TYPE_ARG_TYPES (new_type) = new_reversed;
11293 TREE_TYPE (node->decl) = new_type;
11295 adjustments.release ();
11297 args.release ();
11298 return adjustments;
11301 /* Initialize and copy the function arguments in NODE to their
11302 corresponding local simd arrays. Returns a fresh gimple_seq with
11303 the instruction sequence generated. */
11305 static gimple_seq
11306 simd_clone_init_simd_arrays (struct cgraph_node *node,
11307 ipa_parm_adjustment_vec adjustments)
11309 gimple_seq seq = NULL;
11310 unsigned i = 0, j = 0, k;
11312 for (tree arg = DECL_ARGUMENTS (node->decl);
11313 arg;
11314 arg = DECL_CHAIN (arg), i++, j++)
11316 if (adjustments[j].op == IPA_PARM_OP_COPY)
11317 continue;
11319 node->simdclone->args[i].vector_arg = arg;
11321 tree array = node->simdclone->args[i].simd_array;
11322 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
11324 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11325 tree ptr = build_fold_addr_expr (array);
11326 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11327 build_int_cst (ptype, 0));
11328 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11329 gimplify_and_add (t, &seq);
11331 else
11333 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
11334 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11335 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
11337 tree ptr = build_fold_addr_expr (array);
11338 int elemsize;
11339 if (k)
11341 arg = DECL_CHAIN (arg);
11342 j++;
11344 elemsize
11345 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
11346 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11347 build_int_cst (ptype, k * elemsize));
11348 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11349 gimplify_and_add (t, &seq);
11353 return seq;
11356 /* Callback info for ipa_simd_modify_stmt_ops below. */
11358 struct modify_stmt_info {
11359 ipa_parm_adjustment_vec adjustments;
11360 gimple stmt;
11361 /* True if the parent statement was modified by
11362 ipa_simd_modify_stmt_ops. */
11363 bool modified;
11366 /* Callback for walk_gimple_op.
11368 Adjust operands from a given statement as specified in the
11369 adjustments vector in the callback data. */
11371 static tree
11372 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
11374 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
11375 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
11376 tree *orig_tp = tp;
11377 if (TREE_CODE (*tp) == ADDR_EXPR)
11378 tp = &TREE_OPERAND (*tp, 0);
11379 struct ipa_parm_adjustment *cand = NULL;
11380 if (TREE_CODE (*tp) == PARM_DECL)
11381 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
11382 else
11384 if (TYPE_P (*tp))
11385 *walk_subtrees = 0;
11388 tree repl = NULL_TREE;
11389 if (cand)
11390 repl = unshare_expr (cand->new_decl);
11391 else
11393 if (tp != orig_tp)
11395 *walk_subtrees = 0;
11396 bool modified = info->modified;
11397 info->modified = false;
11398 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
11399 if (!info->modified)
11401 info->modified = modified;
11402 return NULL_TREE;
11404 info->modified = modified;
11405 repl = *tp;
11407 else
11408 return NULL_TREE;
11411 if (tp != orig_tp)
11413 repl = build_fold_addr_expr (repl);
11414 gimple stmt
11415 = gimple_build_assign (make_ssa_name (TREE_TYPE (repl), NULL), repl);
11416 repl = gimple_assign_lhs (stmt);
11417 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
11418 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11419 *orig_tp = repl;
11421 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
11423 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
11424 *tp = vce;
11426 else
11427 *tp = repl;
11429 info->modified = true;
11430 return NULL_TREE;
11433 /* Traverse the function body and perform all modifications as
11434 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11435 modified such that the replacement/reduction value will now be an
11436 offset into the corresponding simd_array.
11438 This function will replace all function argument uses with their
11439 corresponding simd array elements, and ajust the return values
11440 accordingly. */
11442 static void
11443 ipa_simd_modify_function_body (struct cgraph_node *node,
11444 ipa_parm_adjustment_vec adjustments,
11445 tree retval_array, tree iter)
11447 basic_block bb;
11448 unsigned int i, j, l;
11450 /* Re-use the adjustments array, but this time use it to replace
11451 every function argument use to an offset into the corresponding
11452 simd_array. */
11453 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
11455 if (!node->simdclone->args[i].vector_arg)
11456 continue;
11458 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
11459 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
11460 adjustments[j].new_decl
11461 = build4 (ARRAY_REF,
11462 basetype,
11463 node->simdclone->args[i].simd_array,
11464 iter,
11465 NULL_TREE, NULL_TREE);
11466 if (adjustments[j].op == IPA_PARM_OP_NONE
11467 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
11468 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
11471 l = adjustments.length ();
11472 for (i = 1; i < num_ssa_names; i++)
11474 tree name = ssa_name (i);
11475 if (name
11476 && SSA_NAME_VAR (name)
11477 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
11479 for (j = 0; j < l; j++)
11480 if (SSA_NAME_VAR (name) == adjustments[j].base
11481 && adjustments[j].new_decl)
11483 tree base_var;
11484 if (adjustments[j].new_ssa_base == NULL_TREE)
11486 base_var
11487 = copy_var_decl (adjustments[j].base,
11488 DECL_NAME (adjustments[j].base),
11489 TREE_TYPE (adjustments[j].base));
11490 adjustments[j].new_ssa_base = base_var;
11492 else
11493 base_var = adjustments[j].new_ssa_base;
11494 if (SSA_NAME_IS_DEFAULT_DEF (name))
11496 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11497 gimple_stmt_iterator gsi = gsi_after_labels (bb);
11498 tree new_decl = unshare_expr (adjustments[j].new_decl);
11499 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
11500 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11501 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
11502 gimple stmt = gimple_build_assign (name, new_decl);
11503 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11505 else
11506 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11511 struct modify_stmt_info info;
11512 info.adjustments = adjustments;
11514 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
11516 gimple_stmt_iterator gsi;
11518 gsi = gsi_start_bb (bb);
11519 while (!gsi_end_p (gsi))
11521 gimple stmt = gsi_stmt (gsi);
11522 info.stmt = stmt;
11523 struct walk_stmt_info wi;
11525 memset (&wi, 0, sizeof (wi));
11526 info.modified = false;
11527 wi.info = &info;
11528 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
11530 if (gimple_code (stmt) == GIMPLE_RETURN)
11532 tree retval = gimple_return_retval (stmt);
11533 if (!retval)
11535 gsi_remove (&gsi, true);
11536 continue;
11539 /* Replace `return foo' with `retval_array[iter] = foo'. */
11540 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
11541 retval_array, iter, NULL, NULL);
11542 stmt = gimple_build_assign (ref, retval);
11543 gsi_replace (&gsi, stmt, true);
11544 info.modified = true;
11547 if (info.modified)
11549 update_stmt (stmt);
11550 if (maybe_clean_eh_stmt (stmt))
11551 gimple_purge_dead_eh_edges (gimple_bb (stmt));
11553 gsi_next (&gsi);
11558 /* Adjust the argument types in NODE to their appropriate vector
11559 counterparts. */
11561 static void
11562 simd_clone_adjust (struct cgraph_node *node)
11564 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
11566 targetm.simd_clone.adjust (node);
11568 tree retval = simd_clone_adjust_return_type (node);
11569 ipa_parm_adjustment_vec adjustments
11570 = simd_clone_adjust_argument_types (node);
11572 push_gimplify_context ();
11574 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
11576 /* Adjust all uses of vector arguments accordingly. Adjust all
11577 return values accordingly. */
11578 tree iter = create_tmp_var (unsigned_type_node, "iter");
11579 tree iter1 = make_ssa_name (iter, NULL);
11580 tree iter2 = make_ssa_name (iter, NULL);
11581 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
11583 /* Initialize the iteration variable. */
11584 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11585 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
11586 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
11587 /* Insert the SIMD array and iv initialization at function
11588 entry. */
11589 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
11591 pop_gimplify_context (NULL);
11593 /* Create a new BB right before the original exit BB, to hold the
11594 iteration increment and the condition/branch. */
11595 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
11596 basic_block incr_bb = create_empty_bb (orig_exit);
11597 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
11598 flag. Set it now to be a FALLTHRU_EDGE. */
11599 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
11600 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
11601 for (unsigned i = 0;
11602 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
11604 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
11605 redirect_edge_succ (e, incr_bb);
11607 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
11608 e->probability = REG_BR_PROB_BASE;
11609 gsi = gsi_last_bb (incr_bb);
11610 gimple g = gimple_build_assign_with_ops (PLUS_EXPR, iter2, iter1,
11611 build_int_cst (unsigned_type_node,
11612 1));
11613 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11615 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
11616 struct loop *loop = alloc_loop ();
11617 cfun->has_force_vectorize_loops = true;
11618 loop->safelen = node->simdclone->simdlen;
11619 loop->force_vectorize = true;
11620 loop->header = body_bb;
11621 add_bb_to_loop (incr_bb, loop);
11623 /* Branch around the body if the mask applies. */
11624 if (node->simdclone->inbranch)
11626 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
11627 tree mask_array
11628 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
11629 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)), NULL);
11630 tree aref = build4 (ARRAY_REF,
11631 TREE_TYPE (TREE_TYPE (mask_array)),
11632 mask_array, iter1,
11633 NULL, NULL);
11634 g = gimple_build_assign (mask, aref);
11635 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11636 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
11637 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
11639 aref = build1 (VIEW_CONVERT_EXPR,
11640 build_nonstandard_integer_type (bitsize, 0), mask);
11641 mask = make_ssa_name (TREE_TYPE (aref), NULL);
11642 g = gimple_build_assign (mask, aref);
11643 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11646 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
11647 NULL, NULL);
11648 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11649 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
11650 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
11653 /* Generate the condition. */
11654 g = gimple_build_cond (LT_EXPR,
11655 iter2,
11656 build_int_cst (unsigned_type_node,
11657 node->simdclone->simdlen),
11658 NULL, NULL);
11659 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11660 e = split_block (incr_bb, gsi_stmt (gsi));
11661 basic_block latch_bb = e->dest;
11662 basic_block new_exit_bb = e->dest;
11663 new_exit_bb = split_block (latch_bb, NULL)->dest;
11664 loop->latch = latch_bb;
11666 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
11668 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
11669 /* The successor of incr_bb is already pointing to latch_bb; just
11670 change the flags.
11671 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
11672 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
11674 gimple phi = create_phi_node (iter1, body_bb);
11675 edge preheader_edge = find_edge (entry_bb, body_bb);
11676 edge latch_edge = single_succ_edge (latch_bb);
11677 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
11678 UNKNOWN_LOCATION);
11679 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
11681 /* Generate the new return. */
11682 gsi = gsi_last_bb (new_exit_bb);
11683 if (retval
11684 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
11685 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
11686 retval = TREE_OPERAND (retval, 0);
11687 else if (retval)
11689 retval = build1 (VIEW_CONVERT_EXPR,
11690 TREE_TYPE (TREE_TYPE (node->decl)),
11691 retval);
11692 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
11693 false, GSI_CONTINUE_LINKING);
11695 g = gimple_build_return (retval);
11696 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
11698 /* Handle aligned clauses by replacing default defs of the aligned
11699 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
11700 lhs. Handle linear by adding PHIs. */
11701 for (unsigned i = 0; i < node->simdclone->nargs; i++)
11702 if (node->simdclone->args[i].alignment
11703 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
11704 && (node->simdclone->args[i].alignment
11705 & (node->simdclone->args[i].alignment - 1)) == 0
11706 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
11707 == POINTER_TYPE)
11709 unsigned int alignment = node->simdclone->args[i].alignment;
11710 tree orig_arg = node->simdclone->args[i].orig_arg;
11711 tree def = ssa_default_def (cfun, orig_arg);
11712 if (def && !has_zero_uses (def))
11714 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
11715 gimple_seq seq = NULL;
11716 bool need_cvt = false;
11717 gimple call
11718 = gimple_build_call (fn, 2, def, size_int (alignment));
11719 g = call;
11720 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
11721 ptr_type_node))
11722 need_cvt = true;
11723 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg, NULL);
11724 gimple_call_set_lhs (g, t);
11725 gimple_seq_add_stmt_without_update (&seq, g);
11726 if (need_cvt)
11728 t = make_ssa_name (orig_arg, NULL);
11729 g = gimple_build_assign_with_ops (NOP_EXPR, t,
11730 gimple_call_lhs (g),
11731 NULL_TREE);
11732 gimple_seq_add_stmt_without_update (&seq, g);
11734 gsi_insert_seq_on_edge_immediate
11735 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
11737 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11738 int freq = compute_call_stmt_bb_frequency (current_function_decl,
11739 entry_bb);
11740 cgraph_create_edge (node, cgraph_get_create_node (fn),
11741 call, entry_bb->count, freq);
11743 imm_use_iterator iter;
11744 use_operand_p use_p;
11745 gimple use_stmt;
11746 tree repl = gimple_get_lhs (g);
11747 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
11748 if (is_gimple_debug (use_stmt) || use_stmt == call)
11749 continue;
11750 else
11751 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
11752 SET_USE (use_p, repl);
11755 else if (node->simdclone->args[i].arg_type
11756 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
11758 tree orig_arg = node->simdclone->args[i].orig_arg;
11759 tree def = ssa_default_def (cfun, orig_arg);
11760 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
11761 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
11762 if (def && !has_zero_uses (def))
11764 iter1 = make_ssa_name (orig_arg, NULL);
11765 iter2 = make_ssa_name (orig_arg, NULL);
11766 phi = create_phi_node (iter1, body_bb);
11767 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
11768 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
11769 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
11770 ? PLUS_EXPR : POINTER_PLUS_EXPR;
11771 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
11772 ? TREE_TYPE (orig_arg) : sizetype;
11773 tree addcst
11774 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
11775 g = gimple_build_assign_with_ops (code, iter2, iter1, addcst);
11776 gsi = gsi_last_bb (incr_bb);
11777 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
11779 imm_use_iterator iter;
11780 use_operand_p use_p;
11781 gimple use_stmt;
11782 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
11783 if (use_stmt == phi)
11784 continue;
11785 else
11786 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
11787 SET_USE (use_p, iter1);
11791 calculate_dominance_info (CDI_DOMINATORS);
11792 add_loop (loop, loop->header->loop_father);
11793 update_ssa (TODO_update_ssa);
11795 pop_cfun ();
11798 /* If the function in NODE is tagged as an elemental SIMD function,
11799 create the appropriate SIMD clones. */
11801 static void
11802 expand_simd_clones (struct cgraph_node *node)
11804 tree attr = lookup_attribute ("omp declare simd",
11805 DECL_ATTRIBUTES (node->decl));
11806 if (attr == NULL_TREE
11807 || node->global.inlined_to
11808 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
11809 return;
11811 /* Ignore
11812 #pragma omp declare simd
11813 extern int foo ();
11814 in C, there we don't know the argument types at all. */
11815 if (!node->definition
11816 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
11817 return;
11821 /* Start with parsing the "omp declare simd" attribute(s). */
11822 bool inbranch_clause_specified;
11823 struct cgraph_simd_clone *clone_info
11824 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
11825 &inbranch_clause_specified);
11826 if (clone_info == NULL)
11827 continue;
11829 int orig_simdlen = clone_info->simdlen;
11830 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
11831 /* The target can return 0 (no simd clones should be created),
11832 1 (just one ISA of simd clones should be created) or higher
11833 count of ISA variants. In that case, clone_info is initialized
11834 for the first ISA variant. */
11835 int count
11836 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
11837 base_type, 0);
11838 if (count == 0)
11839 continue;
11841 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
11842 also create one inbranch and one !inbranch clone of it. */
11843 for (int i = 0; i < count * 2; i++)
11845 struct cgraph_simd_clone *clone = clone_info;
11846 if (inbranch_clause_specified && (i & 1) != 0)
11847 continue;
11849 if (i != 0)
11851 clone = simd_clone_struct_alloc (clone_info->nargs
11852 + ((i & 1) != 0));
11853 simd_clone_struct_copy (clone, clone_info);
11854 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
11855 and simd_clone_adjust_argument_types did to the first
11856 clone's info. */
11857 clone->nargs -= clone_info->inbranch;
11858 clone->simdlen = orig_simdlen;
11859 /* And call the target hook again to get the right ISA. */
11860 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
11861 base_type,
11862 i / 2);
11863 if ((i & 1) != 0)
11864 clone->inbranch = 1;
11867 /* simd_clone_mangle might fail if such a clone has been created
11868 already. */
11869 tree id = simd_clone_mangle (node, clone);
11870 if (id == NULL_TREE)
11871 continue;
11873 /* Only when we are sure we want to create the clone actually
11874 clone the function (or definitions) or create another
11875 extern FUNCTION_DECL (for prototypes without definitions). */
11876 struct cgraph_node *n = simd_clone_create (node);
11877 if (n == NULL)
11878 continue;
11880 n->simdclone = clone;
11881 clone->origin = node;
11882 clone->next_clone = NULL;
11883 if (node->simd_clones == NULL)
11885 clone->prev_clone = n;
11886 node->simd_clones = n;
11888 else
11890 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
11891 clone->prev_clone->simdclone->next_clone = n;
11892 node->simd_clones->simdclone->prev_clone = n;
11894 change_decl_assembler_name (n->decl, id);
11895 /* And finally adjust the return type, parameters and for
11896 definitions also function body. */
11897 if (node->definition)
11898 simd_clone_adjust (n);
11899 else
11901 simd_clone_adjust_return_type (n);
11902 simd_clone_adjust_argument_types (n);
11906 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
11909 /* Entry point for IPA simd clone creation pass. */
11911 static unsigned int
11912 ipa_omp_simd_clone (void)
11914 struct cgraph_node *node;
11915 FOR_EACH_FUNCTION (node)
11916 expand_simd_clones (node);
11917 return 0;
11920 namespace {
11922 const pass_data pass_data_omp_simd_clone =
11924 SIMPLE_IPA_PASS, /* type */
11925 "simdclone", /* name */
11926 OPTGROUP_NONE, /* optinfo_flags */
11927 true, /* has_execute */
11928 TV_NONE, /* tv_id */
11929 ( PROP_ssa | PROP_cfg ), /* properties_required */
11930 0, /* properties_provided */
11931 0, /* properties_destroyed */
11932 0, /* todo_flags_start */
11933 0, /* todo_flags_finish */
11936 class pass_omp_simd_clone : public simple_ipa_opt_pass
11938 public:
11939 pass_omp_simd_clone(gcc::context *ctxt)
11940 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
11943 /* opt_pass methods: */
11944 virtual bool gate (function *);
11945 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
11948 bool
11949 pass_omp_simd_clone::gate (function *)
11951 return ((flag_openmp || flag_openmp_simd
11952 || flag_cilkplus
11953 || (in_lto_p && !flag_wpa))
11954 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
11957 } // anon namespace
11959 simple_ipa_opt_pass *
11960 make_pass_omp_simd_clone (gcc::context *ctxt)
11962 return new pass_omp_simd_clone (ctxt);
11965 #include "gt-omp-low.h"