Introduce gimple_transaction
[official-gcc.git] / gcc / omp-low.c
blobb8a131b96b9af50caba0e39201858d35f3e9082b
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2014 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "stor-layout.h"
31 #include "rtl.h"
32 #include "basic-block.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
35 #include "gimple-fold.h"
36 #include "gimple-expr.h"
37 #include "is-a.h"
38 #include "gimple.h"
39 #include "gimplify.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-walk.h"
43 #include "tree-iterator.h"
44 #include "tree-inline.h"
45 #include "langhooks.h"
46 #include "diagnostic-core.h"
47 #include "gimple-ssa.h"
48 #include "cgraph.h"
49 #include "tree-cfg.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "tree-ssanames.h"
53 #include "tree-into-ssa.h"
54 #include "expr.h"
55 #include "tree-dfa.h"
56 #include "tree-ssa.h"
57 #include "flags.h"
58 #include "function.h"
59 #include "expr.h"
60 #include "tree-pass.h"
61 #include "except.h"
62 #include "splay-tree.h"
63 #include "optabs.h"
64 #include "cfgloop.h"
65 #include "target.h"
66 #include "omp-low.h"
67 #include "gimple-low.h"
68 #include "tree-cfgcleanup.h"
69 #include "pretty-print.h"
70 #include "ipa-prop.h"
71 #include "tree-nested.h"
72 #include "tree-eh.h"
73 #include "cilk.h"
76 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
77 phases. The first phase scans the function looking for OMP statements
78 and then for variables that must be replaced to satisfy data sharing
79 clauses. The second phase expands code for the constructs, as well as
80 re-gimplifying things when variables have been replaced with complex
81 expressions.
83 Final code generation is done by pass_expand_omp. The flowgraph is
84 scanned for parallel regions which are then moved to a new
85 function, to be invoked by the thread library. */
87 /* Parallel region information. Every parallel and workshare
88 directive is enclosed between two markers, the OMP_* directive
89 and a corresponding OMP_RETURN statement. */
91 struct omp_region
93 /* The enclosing region. */
94 struct omp_region *outer;
96 /* First child region. */
97 struct omp_region *inner;
99 /* Next peer region. */
100 struct omp_region *next;
102 /* Block containing the omp directive as its last stmt. */
103 basic_block entry;
105 /* Block containing the OMP_RETURN as its last stmt. */
106 basic_block exit;
108 /* Block containing the OMP_CONTINUE as its last stmt. */
109 basic_block cont;
111 /* If this is a combined parallel+workshare region, this is a list
112 of additional arguments needed by the combined parallel+workshare
113 library call. */
114 vec<tree, va_gc> *ws_args;
116 /* The code for the omp directive of this region. */
117 enum gimple_code type;
119 /* Schedule kind, only used for OMP_FOR type regions. */
120 enum omp_clause_schedule_kind sched_kind;
122 /* True if this is a combined parallel+workshare region. */
123 bool is_combined_parallel;
126 /* Context structure. Used to store information about each parallel
127 directive in the code. */
129 typedef struct omp_context
131 /* This field must be at the beginning, as we do "inheritance": Some
132 callback functions for tree-inline.c (e.g., omp_copy_decl)
133 receive a copy_body_data pointer that is up-casted to an
134 omp_context pointer. */
135 copy_body_data cb;
137 /* The tree of contexts corresponding to the encountered constructs. */
138 struct omp_context *outer;
139 gimple stmt;
141 /* Map variables to fields in a structure that allows communication
142 between sending and receiving threads. */
143 splay_tree field_map;
144 tree record_type;
145 tree sender_decl;
146 tree receiver_decl;
148 /* These are used just by task contexts, if task firstprivate fn is
149 needed. srecord_type is used to communicate from the thread
150 that encountered the task construct to task firstprivate fn,
151 record_type is allocated by GOMP_task, initialized by task firstprivate
152 fn and passed to the task body fn. */
153 splay_tree sfield_map;
154 tree srecord_type;
156 /* A chain of variables to add to the top-level block surrounding the
157 construct. In the case of a parallel, this is in the child function. */
158 tree block_vars;
160 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
161 barriers should jump to during omplower pass. */
162 tree cancel_label;
164 /* What to do with variables with implicitly determined sharing
165 attributes. */
166 enum omp_clause_default_kind default_kind;
168 /* Nesting depth of this context. Used to beautify error messages re
169 invalid gotos. The outermost ctx is depth 1, with depth 0 being
170 reserved for the main body of the function. */
171 int depth;
173 /* True if this parallel directive is nested within another. */
174 bool is_nested;
176 /* True if this construct can be cancelled. */
177 bool cancellable;
178 } omp_context;
181 struct omp_for_data_loop
183 tree v, n1, n2, step;
184 enum tree_code cond_code;
187 /* A structure describing the main elements of a parallel loop. */
189 struct omp_for_data
191 struct omp_for_data_loop loop;
192 tree chunk_size;
193 gimple for_stmt;
194 tree pre, iter_type;
195 int collapse;
196 bool have_nowait, have_ordered;
197 enum omp_clause_schedule_kind sched_kind;
198 struct omp_for_data_loop *loops;
202 static splay_tree all_contexts;
203 static int taskreg_nesting_level;
204 static int target_nesting_level;
205 static struct omp_region *root_omp_region;
206 static bitmap task_shared_vars;
207 static vec<omp_context *> taskreg_contexts;
209 static void scan_omp (gimple_seq *, omp_context *);
210 static tree scan_omp_1_op (tree *, int *, void *);
212 #define WALK_SUBSTMTS \
213 case GIMPLE_BIND: \
214 case GIMPLE_TRY: \
215 case GIMPLE_CATCH: \
216 case GIMPLE_EH_FILTER: \
217 case GIMPLE_TRANSACTION: \
218 /* The sub-statements for these should be walked. */ \
219 *handled_ops_p = false; \
220 break;
222 /* Convenience function for calling scan_omp_1_op on tree operands. */
224 static inline tree
225 scan_omp_op (tree *tp, omp_context *ctx)
227 struct walk_stmt_info wi;
229 memset (&wi, 0, sizeof (wi));
230 wi.info = ctx;
231 wi.want_locations = true;
233 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
236 static void lower_omp (gimple_seq *, omp_context *);
237 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
238 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
240 /* Find an OpenMP clause of type KIND within CLAUSES. */
242 tree
243 find_omp_clause (tree clauses, enum omp_clause_code kind)
245 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
246 if (OMP_CLAUSE_CODE (clauses) == kind)
247 return clauses;
249 return NULL_TREE;
252 /* Return true if CTX is for an omp parallel. */
254 static inline bool
255 is_parallel_ctx (omp_context *ctx)
257 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
261 /* Return true if CTX is for an omp task. */
263 static inline bool
264 is_task_ctx (omp_context *ctx)
266 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
270 /* Return true if CTX is for an omp parallel or omp task. */
272 static inline bool
273 is_taskreg_ctx (omp_context *ctx)
275 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
276 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
280 /* Return true if REGION is a combined parallel+workshare region. */
282 static inline bool
283 is_combined_parallel (struct omp_region *region)
285 return region->is_combined_parallel;
289 /* Extract the header elements of parallel loop FOR_STMT and store
290 them into *FD. */
292 static void
293 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
294 struct omp_for_data_loop *loops)
296 tree t, var, *collapse_iter, *collapse_count;
297 tree count = NULL_TREE, iter_type = long_integer_type_node;
298 struct omp_for_data_loop *loop;
299 int i;
300 struct omp_for_data_loop dummy_loop;
301 location_t loc = gimple_location (for_stmt);
302 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
303 bool distribute = gimple_omp_for_kind (for_stmt)
304 == GF_OMP_FOR_KIND_DISTRIBUTE;
306 fd->for_stmt = for_stmt;
307 fd->pre = NULL;
308 fd->collapse = gimple_omp_for_collapse (for_stmt);
309 if (fd->collapse > 1)
310 fd->loops = loops;
311 else
312 fd->loops = &fd->loop;
314 fd->have_nowait = distribute || simd;
315 fd->have_ordered = false;
316 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
317 fd->chunk_size = NULL_TREE;
318 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
319 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
320 collapse_iter = NULL;
321 collapse_count = NULL;
323 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
324 switch (OMP_CLAUSE_CODE (t))
326 case OMP_CLAUSE_NOWAIT:
327 fd->have_nowait = true;
328 break;
329 case OMP_CLAUSE_ORDERED:
330 fd->have_ordered = true;
331 break;
332 case OMP_CLAUSE_SCHEDULE:
333 gcc_assert (!distribute);
334 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
335 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
336 break;
337 case OMP_CLAUSE_DIST_SCHEDULE:
338 gcc_assert (distribute);
339 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
340 break;
341 case OMP_CLAUSE_COLLAPSE:
342 if (fd->collapse > 1)
344 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
345 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
347 break;
348 default:
349 break;
352 /* FIXME: for now map schedule(auto) to schedule(static).
353 There should be analysis to determine whether all iterations
354 are approximately the same amount of work (then schedule(static)
355 is best) or if it varies (then schedule(dynamic,N) is better). */
356 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
358 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
359 gcc_assert (fd->chunk_size == NULL);
361 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
362 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
363 gcc_assert (fd->chunk_size == NULL);
364 else if (fd->chunk_size == NULL)
366 /* We only need to compute a default chunk size for ordered
367 static loops and dynamic loops. */
368 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
369 || fd->have_ordered)
370 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
371 ? integer_zero_node : integer_one_node;
374 for (i = 0; i < fd->collapse; i++)
376 if (fd->collapse == 1)
377 loop = &fd->loop;
378 else if (loops != NULL)
379 loop = loops + i;
380 else
381 loop = &dummy_loop;
383 loop->v = gimple_omp_for_index (for_stmt, i);
384 gcc_assert (SSA_VAR_P (loop->v));
385 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
386 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
387 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
388 loop->n1 = gimple_omp_for_initial (for_stmt, i);
390 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
391 loop->n2 = gimple_omp_for_final (for_stmt, i);
392 switch (loop->cond_code)
394 case LT_EXPR:
395 case GT_EXPR:
396 break;
397 case NE_EXPR:
398 gcc_assert (gimple_omp_for_kind (for_stmt)
399 == GF_OMP_FOR_KIND_CILKSIMD
400 || (gimple_omp_for_kind (for_stmt)
401 == GF_OMP_FOR_KIND_CILKFOR));
402 break;
403 case LE_EXPR:
404 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
405 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
406 else
407 loop->n2 = fold_build2_loc (loc,
408 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
409 build_int_cst (TREE_TYPE (loop->n2), 1));
410 loop->cond_code = LT_EXPR;
411 break;
412 case GE_EXPR:
413 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
414 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
415 else
416 loop->n2 = fold_build2_loc (loc,
417 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
418 build_int_cst (TREE_TYPE (loop->n2), 1));
419 loop->cond_code = GT_EXPR;
420 break;
421 default:
422 gcc_unreachable ();
425 t = gimple_omp_for_incr (for_stmt, i);
426 gcc_assert (TREE_OPERAND (t, 0) == var);
427 switch (TREE_CODE (t))
429 case PLUS_EXPR:
430 loop->step = TREE_OPERAND (t, 1);
431 break;
432 case POINTER_PLUS_EXPR:
433 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
434 break;
435 case MINUS_EXPR:
436 loop->step = TREE_OPERAND (t, 1);
437 loop->step = fold_build1_loc (loc,
438 NEGATE_EXPR, TREE_TYPE (loop->step),
439 loop->step);
440 break;
441 default:
442 gcc_unreachable ();
445 if (simd
446 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
447 && !fd->have_ordered))
449 if (fd->collapse == 1)
450 iter_type = TREE_TYPE (loop->v);
451 else if (i == 0
452 || TYPE_PRECISION (iter_type)
453 < TYPE_PRECISION (TREE_TYPE (loop->v)))
454 iter_type
455 = build_nonstandard_integer_type
456 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
458 else if (iter_type != long_long_unsigned_type_node)
460 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
461 iter_type = long_long_unsigned_type_node;
462 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
463 && TYPE_PRECISION (TREE_TYPE (loop->v))
464 >= TYPE_PRECISION (iter_type))
466 tree n;
468 if (loop->cond_code == LT_EXPR)
469 n = fold_build2_loc (loc,
470 PLUS_EXPR, TREE_TYPE (loop->v),
471 loop->n2, loop->step);
472 else
473 n = loop->n1;
474 if (TREE_CODE (n) != INTEGER_CST
475 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
476 iter_type = long_long_unsigned_type_node;
478 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
479 > TYPE_PRECISION (iter_type))
481 tree n1, n2;
483 if (loop->cond_code == LT_EXPR)
485 n1 = loop->n1;
486 n2 = fold_build2_loc (loc,
487 PLUS_EXPR, TREE_TYPE (loop->v),
488 loop->n2, loop->step);
490 else
492 n1 = fold_build2_loc (loc,
493 MINUS_EXPR, TREE_TYPE (loop->v),
494 loop->n2, loop->step);
495 n2 = loop->n1;
497 if (TREE_CODE (n1) != INTEGER_CST
498 || TREE_CODE (n2) != INTEGER_CST
499 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
500 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
501 iter_type = long_long_unsigned_type_node;
505 if (collapse_count && *collapse_count == NULL)
507 t = fold_binary (loop->cond_code, boolean_type_node,
508 fold_convert (TREE_TYPE (loop->v), loop->n1),
509 fold_convert (TREE_TYPE (loop->v), loop->n2));
510 if (t && integer_zerop (t))
511 count = build_zero_cst (long_long_unsigned_type_node);
512 else if ((i == 0 || count != NULL_TREE)
513 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
514 && TREE_CONSTANT (loop->n1)
515 && TREE_CONSTANT (loop->n2)
516 && TREE_CODE (loop->step) == INTEGER_CST)
518 tree itype = TREE_TYPE (loop->v);
520 if (POINTER_TYPE_P (itype))
521 itype = signed_type_for (itype);
522 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
523 t = fold_build2_loc (loc,
524 PLUS_EXPR, itype,
525 fold_convert_loc (loc, itype, loop->step), t);
526 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
527 fold_convert_loc (loc, itype, loop->n2));
528 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
529 fold_convert_loc (loc, itype, loop->n1));
530 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
531 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
532 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
533 fold_build1_loc (loc, NEGATE_EXPR, itype,
534 fold_convert_loc (loc, itype,
535 loop->step)));
536 else
537 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
538 fold_convert_loc (loc, itype, loop->step));
539 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
540 if (count != NULL_TREE)
541 count = fold_build2_loc (loc,
542 MULT_EXPR, long_long_unsigned_type_node,
543 count, t);
544 else
545 count = t;
546 if (TREE_CODE (count) != INTEGER_CST)
547 count = NULL_TREE;
549 else if (count && !integer_zerop (count))
550 count = NULL_TREE;
554 if (count
555 && !simd
556 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
557 || fd->have_ordered))
559 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
560 iter_type = long_long_unsigned_type_node;
561 else
562 iter_type = long_integer_type_node;
564 else if (collapse_iter && *collapse_iter != NULL)
565 iter_type = TREE_TYPE (*collapse_iter);
566 fd->iter_type = iter_type;
567 if (collapse_iter && *collapse_iter == NULL)
568 *collapse_iter = create_tmp_var (iter_type, ".iter");
569 if (collapse_count && *collapse_count == NULL)
571 if (count)
572 *collapse_count = fold_convert_loc (loc, iter_type, count);
573 else
574 *collapse_count = create_tmp_var (iter_type, ".count");
577 if (fd->collapse > 1)
579 fd->loop.v = *collapse_iter;
580 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
581 fd->loop.n2 = *collapse_count;
582 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
583 fd->loop.cond_code = LT_EXPR;
588 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
589 is the immediate dominator of PAR_ENTRY_BB, return true if there
590 are no data dependencies that would prevent expanding the parallel
591 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
593 When expanding a combined parallel+workshare region, the call to
594 the child function may need additional arguments in the case of
595 GIMPLE_OMP_FOR regions. In some cases, these arguments are
596 computed out of variables passed in from the parent to the child
597 via 'struct .omp_data_s'. For instance:
599 #pragma omp parallel for schedule (guided, i * 4)
600 for (j ...)
602 Is lowered into:
604 # BLOCK 2 (PAR_ENTRY_BB)
605 .omp_data_o.i = i;
606 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
608 # BLOCK 3 (WS_ENTRY_BB)
609 .omp_data_i = &.omp_data_o;
610 D.1667 = .omp_data_i->i;
611 D.1598 = D.1667 * 4;
612 #pragma omp for schedule (guided, D.1598)
614 When we outline the parallel region, the call to the child function
615 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
616 that value is computed *after* the call site. So, in principle we
617 cannot do the transformation.
619 To see whether the code in WS_ENTRY_BB blocks the combined
620 parallel+workshare call, we collect all the variables used in the
621 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
622 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
623 call.
625 FIXME. If we had the SSA form built at this point, we could merely
626 hoist the code in block 3 into block 2 and be done with it. But at
627 this point we don't have dataflow information and though we could
628 hack something up here, it is really not worth the aggravation. */
630 static bool
631 workshare_safe_to_combine_p (basic_block ws_entry_bb)
633 struct omp_for_data fd;
634 gimple ws_stmt = last_stmt (ws_entry_bb);
636 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
637 return true;
639 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
641 extract_omp_for_data (ws_stmt, &fd, NULL);
643 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
644 return false;
645 if (fd.iter_type != long_integer_type_node)
646 return false;
648 /* FIXME. We give up too easily here. If any of these arguments
649 are not constants, they will likely involve variables that have
650 been mapped into fields of .omp_data_s for sharing with the child
651 function. With appropriate data flow, it would be possible to
652 see through this. */
653 if (!is_gimple_min_invariant (fd.loop.n1)
654 || !is_gimple_min_invariant (fd.loop.n2)
655 || !is_gimple_min_invariant (fd.loop.step)
656 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
657 return false;
659 return true;
663 /* Collect additional arguments needed to emit a combined
664 parallel+workshare call. WS_STMT is the workshare directive being
665 expanded. */
667 static vec<tree, va_gc> *
668 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
670 tree t;
671 location_t loc = gimple_location (ws_stmt);
672 vec<tree, va_gc> *ws_args;
674 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
676 struct omp_for_data fd;
677 tree n1, n2;
679 extract_omp_for_data (ws_stmt, &fd, NULL);
680 n1 = fd.loop.n1;
681 n2 = fd.loop.n2;
683 if (gimple_omp_for_combined_into_p (ws_stmt))
685 tree innerc
686 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
687 OMP_CLAUSE__LOOPTEMP_);
688 gcc_assert (innerc);
689 n1 = OMP_CLAUSE_DECL (innerc);
690 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
691 OMP_CLAUSE__LOOPTEMP_);
692 gcc_assert (innerc);
693 n2 = OMP_CLAUSE_DECL (innerc);
696 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
698 t = fold_convert_loc (loc, long_integer_type_node, n1);
699 ws_args->quick_push (t);
701 t = fold_convert_loc (loc, long_integer_type_node, n2);
702 ws_args->quick_push (t);
704 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
705 ws_args->quick_push (t);
707 if (fd.chunk_size)
709 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
710 ws_args->quick_push (t);
713 return ws_args;
715 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
717 /* Number of sections is equal to the number of edges from the
718 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
719 the exit of the sections region. */
720 basic_block bb = single_succ (gimple_bb (ws_stmt));
721 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
722 vec_alloc (ws_args, 1);
723 ws_args->quick_push (t);
724 return ws_args;
727 gcc_unreachable ();
731 /* Discover whether REGION is a combined parallel+workshare region. */
733 static void
734 determine_parallel_type (struct omp_region *region)
736 basic_block par_entry_bb, par_exit_bb;
737 basic_block ws_entry_bb, ws_exit_bb;
739 if (region == NULL || region->inner == NULL
740 || region->exit == NULL || region->inner->exit == NULL
741 || region->inner->cont == NULL)
742 return;
744 /* We only support parallel+for and parallel+sections. */
745 if (region->type != GIMPLE_OMP_PARALLEL
746 || (region->inner->type != GIMPLE_OMP_FOR
747 && region->inner->type != GIMPLE_OMP_SECTIONS))
748 return;
750 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
751 WS_EXIT_BB -> PAR_EXIT_BB. */
752 par_entry_bb = region->entry;
753 par_exit_bb = region->exit;
754 ws_entry_bb = region->inner->entry;
755 ws_exit_bb = region->inner->exit;
757 if (single_succ (par_entry_bb) == ws_entry_bb
758 && single_succ (ws_exit_bb) == par_exit_bb
759 && workshare_safe_to_combine_p (ws_entry_bb)
760 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
761 || (last_and_only_stmt (ws_entry_bb)
762 && last_and_only_stmt (par_exit_bb))))
764 gimple par_stmt = last_stmt (par_entry_bb);
765 gimple ws_stmt = last_stmt (ws_entry_bb);
767 if (region->inner->type == GIMPLE_OMP_FOR)
769 /* If this is a combined parallel loop, we need to determine
770 whether or not to use the combined library calls. There
771 are two cases where we do not apply the transformation:
772 static loops and any kind of ordered loop. In the first
773 case, we already open code the loop so there is no need
774 to do anything else. In the latter case, the combined
775 parallel loop call would still need extra synchronization
776 to implement ordered semantics, so there would not be any
777 gain in using the combined call. */
778 tree clauses = gimple_omp_for_clauses (ws_stmt);
779 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
780 if (c == NULL
781 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
782 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
784 region->is_combined_parallel = false;
785 region->inner->is_combined_parallel = false;
786 return;
790 region->is_combined_parallel = true;
791 region->inner->is_combined_parallel = true;
792 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
797 /* Return true if EXPR is variable sized. */
799 static inline bool
800 is_variable_sized (const_tree expr)
802 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
805 /* Return true if DECL is a reference type. */
807 static inline bool
808 is_reference (tree decl)
810 return lang_hooks.decls.omp_privatize_by_reference (decl);
813 /* Lookup variables in the decl or field splay trees. The "maybe" form
814 allows for the variable form to not have been entered, otherwise we
815 assert that the variable must have been entered. */
817 static inline tree
818 lookup_decl (tree var, omp_context *ctx)
820 tree *n = ctx->cb.decl_map->get (var);
821 return *n;
824 static inline tree
825 maybe_lookup_decl (const_tree var, omp_context *ctx)
827 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
828 return n ? *n : NULL_TREE;
831 static inline tree
832 lookup_field (tree var, omp_context *ctx)
834 splay_tree_node n;
835 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
836 return (tree) n->value;
839 static inline tree
840 lookup_sfield (tree var, omp_context *ctx)
842 splay_tree_node n;
843 n = splay_tree_lookup (ctx->sfield_map
844 ? ctx->sfield_map : ctx->field_map,
845 (splay_tree_key) var);
846 return (tree) n->value;
849 static inline tree
850 maybe_lookup_field (tree var, omp_context *ctx)
852 splay_tree_node n;
853 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
854 return n ? (tree) n->value : NULL_TREE;
857 /* Return true if DECL should be copied by pointer. SHARED_CTX is
858 the parallel context if DECL is to be shared. */
860 static bool
861 use_pointer_for_field (tree decl, omp_context *shared_ctx)
863 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
864 return true;
866 /* We can only use copy-in/copy-out semantics for shared variables
867 when we know the value is not accessible from an outer scope. */
868 if (shared_ctx)
870 /* ??? Trivially accessible from anywhere. But why would we even
871 be passing an address in this case? Should we simply assert
872 this to be false, or should we have a cleanup pass that removes
873 these from the list of mappings? */
874 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
875 return true;
877 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
878 without analyzing the expression whether or not its location
879 is accessible to anyone else. In the case of nested parallel
880 regions it certainly may be. */
881 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
882 return true;
884 /* Do not use copy-in/copy-out for variables that have their
885 address taken. */
886 if (TREE_ADDRESSABLE (decl))
887 return true;
889 /* lower_send_shared_vars only uses copy-in, but not copy-out
890 for these. */
891 if (TREE_READONLY (decl)
892 || ((TREE_CODE (decl) == RESULT_DECL
893 || TREE_CODE (decl) == PARM_DECL)
894 && DECL_BY_REFERENCE (decl)))
895 return false;
897 /* Disallow copy-in/out in nested parallel if
898 decl is shared in outer parallel, otherwise
899 each thread could store the shared variable
900 in its own copy-in location, making the
901 variable no longer really shared. */
902 if (shared_ctx->is_nested)
904 omp_context *up;
906 for (up = shared_ctx->outer; up; up = up->outer)
907 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
908 break;
910 if (up)
912 tree c;
914 for (c = gimple_omp_taskreg_clauses (up->stmt);
915 c; c = OMP_CLAUSE_CHAIN (c))
916 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
917 && OMP_CLAUSE_DECL (c) == decl)
918 break;
920 if (c)
921 goto maybe_mark_addressable_and_ret;
925 /* For tasks avoid using copy-in/out. As tasks can be
926 deferred or executed in different thread, when GOMP_task
927 returns, the task hasn't necessarily terminated. */
928 if (is_task_ctx (shared_ctx))
930 tree outer;
931 maybe_mark_addressable_and_ret:
932 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
933 if (is_gimple_reg (outer))
935 /* Taking address of OUTER in lower_send_shared_vars
936 might need regimplification of everything that uses the
937 variable. */
938 if (!task_shared_vars)
939 task_shared_vars = BITMAP_ALLOC (NULL);
940 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
941 TREE_ADDRESSABLE (outer) = 1;
943 return true;
947 return false;
950 /* Construct a new automatic decl similar to VAR. */
952 static tree
953 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
955 tree copy = copy_var_decl (var, name, type);
957 DECL_CONTEXT (copy) = current_function_decl;
958 DECL_CHAIN (copy) = ctx->block_vars;
959 ctx->block_vars = copy;
961 return copy;
964 static tree
965 omp_copy_decl_1 (tree var, omp_context *ctx)
967 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
970 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
971 as appropriate. */
972 static tree
973 omp_build_component_ref (tree obj, tree field)
975 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
976 if (TREE_THIS_VOLATILE (field))
977 TREE_THIS_VOLATILE (ret) |= 1;
978 if (TREE_READONLY (field))
979 TREE_READONLY (ret) |= 1;
980 return ret;
983 /* Build tree nodes to access the field for VAR on the receiver side. */
985 static tree
986 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
988 tree x, field = lookup_field (var, ctx);
990 /* If the receiver record type was remapped in the child function,
991 remap the field into the new record type. */
992 x = maybe_lookup_field (field, ctx);
993 if (x != NULL)
994 field = x;
996 x = build_simple_mem_ref (ctx->receiver_decl);
997 x = omp_build_component_ref (x, field);
998 if (by_ref)
999 x = build_simple_mem_ref (x);
1001 return x;
1004 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1005 of a parallel, this is a component reference; for workshare constructs
1006 this is some variable. */
1008 static tree
1009 build_outer_var_ref (tree var, omp_context *ctx)
1011 tree x;
1013 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1014 x = var;
1015 else if (is_variable_sized (var))
1017 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1018 x = build_outer_var_ref (x, ctx);
1019 x = build_simple_mem_ref (x);
1021 else if (is_taskreg_ctx (ctx))
1023 bool by_ref = use_pointer_for_field (var, NULL);
1024 x = build_receiver_ref (var, by_ref, ctx);
1026 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1027 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1029 /* #pragma omp simd isn't a worksharing construct, and can reference even
1030 private vars in its linear etc. clauses. */
1031 x = NULL_TREE;
1032 if (ctx->outer && is_taskreg_ctx (ctx))
1033 x = lookup_decl (var, ctx->outer);
1034 else if (ctx->outer)
1035 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1036 if (x == NULL_TREE)
1037 x = var;
1039 else if (ctx->outer)
1040 x = lookup_decl (var, ctx->outer);
1041 else if (is_reference (var))
1042 /* This can happen with orphaned constructs. If var is reference, it is
1043 possible it is shared and as such valid. */
1044 x = var;
1045 else
1046 gcc_unreachable ();
1048 if (is_reference (var))
1049 x = build_simple_mem_ref (x);
1051 return x;
1054 /* Build tree nodes to access the field for VAR on the sender side. */
1056 static tree
1057 build_sender_ref (tree var, omp_context *ctx)
1059 tree field = lookup_sfield (var, ctx);
1060 return omp_build_component_ref (ctx->sender_decl, field);
1063 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1065 static void
1066 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1068 tree field, type, sfield = NULL_TREE;
1070 gcc_assert ((mask & 1) == 0
1071 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1072 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1073 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1075 type = TREE_TYPE (var);
1076 if (mask & 4)
1078 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1079 type = build_pointer_type (build_pointer_type (type));
1081 else if (by_ref)
1082 type = build_pointer_type (type);
1083 else if ((mask & 3) == 1 && is_reference (var))
1084 type = TREE_TYPE (type);
1086 field = build_decl (DECL_SOURCE_LOCATION (var),
1087 FIELD_DECL, DECL_NAME (var), type);
1089 /* Remember what variable this field was created for. This does have a
1090 side effect of making dwarf2out ignore this member, so for helpful
1091 debugging we clear it later in delete_omp_context. */
1092 DECL_ABSTRACT_ORIGIN (field) = var;
1093 if (type == TREE_TYPE (var))
1095 DECL_ALIGN (field) = DECL_ALIGN (var);
1096 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1097 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1099 else
1100 DECL_ALIGN (field) = TYPE_ALIGN (type);
1102 if ((mask & 3) == 3)
1104 insert_field_into_struct (ctx->record_type, field);
1105 if (ctx->srecord_type)
1107 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1108 FIELD_DECL, DECL_NAME (var), type);
1109 DECL_ABSTRACT_ORIGIN (sfield) = var;
1110 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1111 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1112 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1113 insert_field_into_struct (ctx->srecord_type, sfield);
1116 else
1118 if (ctx->srecord_type == NULL_TREE)
1120 tree t;
1122 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1123 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1124 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1126 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1127 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1128 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1129 insert_field_into_struct (ctx->srecord_type, sfield);
1130 splay_tree_insert (ctx->sfield_map,
1131 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1132 (splay_tree_value) sfield);
1135 sfield = field;
1136 insert_field_into_struct ((mask & 1) ? ctx->record_type
1137 : ctx->srecord_type, field);
1140 if (mask & 1)
1141 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1142 (splay_tree_value) field);
1143 if ((mask & 2) && ctx->sfield_map)
1144 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1145 (splay_tree_value) sfield);
1148 static tree
1149 install_var_local (tree var, omp_context *ctx)
1151 tree new_var = omp_copy_decl_1 (var, ctx);
1152 insert_decl_map (&ctx->cb, var, new_var);
1153 return new_var;
1156 /* Adjust the replacement for DECL in CTX for the new context. This means
1157 copying the DECL_VALUE_EXPR, and fixing up the type. */
1159 static void
1160 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1162 tree new_decl, size;
1164 new_decl = lookup_decl (decl, ctx);
1166 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1168 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1169 && DECL_HAS_VALUE_EXPR_P (decl))
1171 tree ve = DECL_VALUE_EXPR (decl);
1172 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1173 SET_DECL_VALUE_EXPR (new_decl, ve);
1174 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1177 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1179 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1180 if (size == error_mark_node)
1181 size = TYPE_SIZE (TREE_TYPE (new_decl));
1182 DECL_SIZE (new_decl) = size;
1184 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1185 if (size == error_mark_node)
1186 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1187 DECL_SIZE_UNIT (new_decl) = size;
1191 /* The callback for remap_decl. Search all containing contexts for a
1192 mapping of the variable; this avoids having to duplicate the splay
1193 tree ahead of time. We know a mapping doesn't already exist in the
1194 given context. Create new mappings to implement default semantics. */
1196 static tree
1197 omp_copy_decl (tree var, copy_body_data *cb)
1199 omp_context *ctx = (omp_context *) cb;
1200 tree new_var;
1202 if (TREE_CODE (var) == LABEL_DECL)
1204 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1205 DECL_CONTEXT (new_var) = current_function_decl;
1206 insert_decl_map (&ctx->cb, var, new_var);
1207 return new_var;
1210 while (!is_taskreg_ctx (ctx))
1212 ctx = ctx->outer;
1213 if (ctx == NULL)
1214 return var;
1215 new_var = maybe_lookup_decl (var, ctx);
1216 if (new_var)
1217 return new_var;
1220 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1221 return var;
1223 return error_mark_node;
1227 /* Debugging dumps for parallel regions. */
1228 void dump_omp_region (FILE *, struct omp_region *, int);
1229 void debug_omp_region (struct omp_region *);
1230 void debug_all_omp_regions (void);
1232 /* Dump the parallel region tree rooted at REGION. */
1234 void
1235 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1237 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1238 gimple_code_name[region->type]);
1240 if (region->inner)
1241 dump_omp_region (file, region->inner, indent + 4);
1243 if (region->cont)
1245 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1246 region->cont->index);
1249 if (region->exit)
1250 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1251 region->exit->index);
1252 else
1253 fprintf (file, "%*s[no exit marker]\n", indent, "");
1255 if (region->next)
1256 dump_omp_region (file, region->next, indent);
1259 DEBUG_FUNCTION void
1260 debug_omp_region (struct omp_region *region)
1262 dump_omp_region (stderr, region, 0);
1265 DEBUG_FUNCTION void
1266 debug_all_omp_regions (void)
1268 dump_omp_region (stderr, root_omp_region, 0);
1272 /* Create a new parallel region starting at STMT inside region PARENT. */
1274 static struct omp_region *
1275 new_omp_region (basic_block bb, enum gimple_code type,
1276 struct omp_region *parent)
1278 struct omp_region *region = XCNEW (struct omp_region);
1280 region->outer = parent;
1281 region->entry = bb;
1282 region->type = type;
1284 if (parent)
1286 /* This is a nested region. Add it to the list of inner
1287 regions in PARENT. */
1288 region->next = parent->inner;
1289 parent->inner = region;
1291 else
1293 /* This is a toplevel region. Add it to the list of toplevel
1294 regions in ROOT_OMP_REGION. */
1295 region->next = root_omp_region;
1296 root_omp_region = region;
1299 return region;
1302 /* Release the memory associated with the region tree rooted at REGION. */
1304 static void
1305 free_omp_region_1 (struct omp_region *region)
1307 struct omp_region *i, *n;
1309 for (i = region->inner; i ; i = n)
1311 n = i->next;
1312 free_omp_region_1 (i);
1315 free (region);
1318 /* Release the memory for the entire omp region tree. */
1320 void
1321 free_omp_regions (void)
1323 struct omp_region *r, *n;
1324 for (r = root_omp_region; r ; r = n)
1326 n = r->next;
1327 free_omp_region_1 (r);
1329 root_omp_region = NULL;
1333 /* Create a new context, with OUTER_CTX being the surrounding context. */
1335 static omp_context *
1336 new_omp_context (gimple stmt, omp_context *outer_ctx)
1338 omp_context *ctx = XCNEW (omp_context);
1340 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1341 (splay_tree_value) ctx);
1342 ctx->stmt = stmt;
1344 if (outer_ctx)
1346 ctx->outer = outer_ctx;
1347 ctx->cb = outer_ctx->cb;
1348 ctx->cb.block = NULL;
1349 ctx->depth = outer_ctx->depth + 1;
1351 else
1353 ctx->cb.src_fn = current_function_decl;
1354 ctx->cb.dst_fn = current_function_decl;
1355 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1356 gcc_checking_assert (ctx->cb.src_node);
1357 ctx->cb.dst_node = ctx->cb.src_node;
1358 ctx->cb.src_cfun = cfun;
1359 ctx->cb.copy_decl = omp_copy_decl;
1360 ctx->cb.eh_lp_nr = 0;
1361 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1362 ctx->depth = 1;
1365 ctx->cb.decl_map = new hash_map<tree, tree>;
1367 return ctx;
1370 static gimple_seq maybe_catch_exception (gimple_seq);
1372 /* Finalize task copyfn. */
1374 static void
1375 finalize_task_copyfn (gimple task_stmt)
1377 struct function *child_cfun;
1378 tree child_fn;
1379 gimple_seq seq = NULL, new_seq;
1380 gimple bind;
1382 child_fn = gimple_omp_task_copy_fn (task_stmt);
1383 if (child_fn == NULL_TREE)
1384 return;
1386 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1387 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1389 push_cfun (child_cfun);
1390 bind = gimplify_body (child_fn, false);
1391 gimple_seq_add_stmt (&seq, bind);
1392 new_seq = maybe_catch_exception (seq);
1393 if (new_seq != seq)
1395 bind = gimple_build_bind (NULL, new_seq, NULL);
1396 seq = NULL;
1397 gimple_seq_add_stmt (&seq, bind);
1399 gimple_set_body (child_fn, seq);
1400 pop_cfun ();
1402 /* Inform the callgraph about the new function. */
1403 cgraph_node::add_new_function (child_fn, false);
1406 /* Destroy a omp_context data structures. Called through the splay tree
1407 value delete callback. */
1409 static void
1410 delete_omp_context (splay_tree_value value)
1412 omp_context *ctx = (omp_context *) value;
1414 delete ctx->cb.decl_map;
1416 if (ctx->field_map)
1417 splay_tree_delete (ctx->field_map);
1418 if (ctx->sfield_map)
1419 splay_tree_delete (ctx->sfield_map);
1421 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1422 it produces corrupt debug information. */
1423 if (ctx->record_type)
1425 tree t;
1426 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1427 DECL_ABSTRACT_ORIGIN (t) = NULL;
1429 if (ctx->srecord_type)
1431 tree t;
1432 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1433 DECL_ABSTRACT_ORIGIN (t) = NULL;
1436 if (is_task_ctx (ctx))
1437 finalize_task_copyfn (ctx->stmt);
1439 XDELETE (ctx);
1442 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1443 context. */
1445 static void
1446 fixup_child_record_type (omp_context *ctx)
1448 tree f, type = ctx->record_type;
1450 /* ??? It isn't sufficient to just call remap_type here, because
1451 variably_modified_type_p doesn't work the way we expect for
1452 record types. Testing each field for whether it needs remapping
1453 and creating a new record by hand works, however. */
1454 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1455 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1456 break;
1457 if (f)
1459 tree name, new_fields = NULL;
1461 type = lang_hooks.types.make_type (RECORD_TYPE);
1462 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1463 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1464 TYPE_DECL, name, type);
1465 TYPE_NAME (type) = name;
1467 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1469 tree new_f = copy_node (f);
1470 DECL_CONTEXT (new_f) = type;
1471 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1472 DECL_CHAIN (new_f) = new_fields;
1473 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1474 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1475 &ctx->cb, NULL);
1476 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1477 &ctx->cb, NULL);
1478 new_fields = new_f;
1480 /* Arrange to be able to look up the receiver field
1481 given the sender field. */
1482 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1483 (splay_tree_value) new_f);
1485 TYPE_FIELDS (type) = nreverse (new_fields);
1486 layout_type (type);
1489 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1492 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1493 specified by CLAUSES. */
1495 static void
1496 scan_sharing_clauses (tree clauses, omp_context *ctx)
1498 tree c, decl;
1499 bool scan_array_reductions = false;
1501 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1503 bool by_ref;
1505 switch (OMP_CLAUSE_CODE (c))
1507 case OMP_CLAUSE_PRIVATE:
1508 decl = OMP_CLAUSE_DECL (c);
1509 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1510 goto do_private;
1511 else if (!is_variable_sized (decl))
1512 install_var_local (decl, ctx);
1513 break;
1515 case OMP_CLAUSE_SHARED:
1516 decl = OMP_CLAUSE_DECL (c);
1517 /* Ignore shared directives in teams construct. */
1518 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1520 /* Global variables don't need to be copied,
1521 the receiver side will use them directly. */
1522 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1523 if (is_global_var (odecl))
1524 break;
1525 insert_decl_map (&ctx->cb, decl, odecl);
1526 break;
1528 gcc_assert (is_taskreg_ctx (ctx));
1529 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1530 || !is_variable_sized (decl));
1531 /* Global variables don't need to be copied,
1532 the receiver side will use them directly. */
1533 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1534 break;
1535 by_ref = use_pointer_for_field (decl, ctx);
1536 if (! TREE_READONLY (decl)
1537 || TREE_ADDRESSABLE (decl)
1538 || by_ref
1539 || is_reference (decl))
1541 install_var_field (decl, by_ref, 3, ctx);
1542 install_var_local (decl, ctx);
1543 break;
1545 /* We don't need to copy const scalar vars back. */
1546 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1547 goto do_private;
1549 case OMP_CLAUSE_LASTPRIVATE:
1550 /* Let the corresponding firstprivate clause create
1551 the variable. */
1552 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1553 break;
1554 /* FALLTHRU */
1556 case OMP_CLAUSE_FIRSTPRIVATE:
1557 case OMP_CLAUSE_REDUCTION:
1558 case OMP_CLAUSE_LINEAR:
1559 decl = OMP_CLAUSE_DECL (c);
1560 do_private:
1561 if (is_variable_sized (decl))
1563 if (is_task_ctx (ctx))
1564 install_var_field (decl, false, 1, ctx);
1565 break;
1567 else if (is_taskreg_ctx (ctx))
1569 bool global
1570 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1571 by_ref = use_pointer_for_field (decl, NULL);
1573 if (is_task_ctx (ctx)
1574 && (global || by_ref || is_reference (decl)))
1576 install_var_field (decl, false, 1, ctx);
1577 if (!global)
1578 install_var_field (decl, by_ref, 2, ctx);
1580 else if (!global)
1581 install_var_field (decl, by_ref, 3, ctx);
1583 install_var_local (decl, ctx);
1584 break;
1586 case OMP_CLAUSE__LOOPTEMP_:
1587 gcc_assert (is_parallel_ctx (ctx));
1588 decl = OMP_CLAUSE_DECL (c);
1589 install_var_field (decl, false, 3, ctx);
1590 install_var_local (decl, ctx);
1591 break;
1593 case OMP_CLAUSE_COPYPRIVATE:
1594 case OMP_CLAUSE_COPYIN:
1595 decl = OMP_CLAUSE_DECL (c);
1596 by_ref = use_pointer_for_field (decl, NULL);
1597 install_var_field (decl, by_ref, 3, ctx);
1598 break;
1600 case OMP_CLAUSE_DEFAULT:
1601 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1602 break;
1604 case OMP_CLAUSE_FINAL:
1605 case OMP_CLAUSE_IF:
1606 case OMP_CLAUSE_NUM_THREADS:
1607 case OMP_CLAUSE_NUM_TEAMS:
1608 case OMP_CLAUSE_THREAD_LIMIT:
1609 case OMP_CLAUSE_DEVICE:
1610 case OMP_CLAUSE_SCHEDULE:
1611 case OMP_CLAUSE_DIST_SCHEDULE:
1612 case OMP_CLAUSE_DEPEND:
1613 case OMP_CLAUSE__CILK_FOR_COUNT_:
1614 if (ctx->outer)
1615 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1616 break;
1618 case OMP_CLAUSE_TO:
1619 case OMP_CLAUSE_FROM:
1620 case OMP_CLAUSE_MAP:
1621 if (ctx->outer)
1622 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1623 decl = OMP_CLAUSE_DECL (c);
1624 /* Global variables with "omp declare target" attribute
1625 don't need to be copied, the receiver side will use them
1626 directly. */
1627 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1628 && DECL_P (decl)
1629 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1630 && lookup_attribute ("omp declare target",
1631 DECL_ATTRIBUTES (decl)))
1632 break;
1633 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1634 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1636 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1637 #pragma omp target data, there is nothing to map for
1638 those. */
1639 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1640 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1641 break;
1643 if (DECL_P (decl))
1645 if (DECL_SIZE (decl)
1646 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1648 tree decl2 = DECL_VALUE_EXPR (decl);
1649 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1650 decl2 = TREE_OPERAND (decl2, 0);
1651 gcc_assert (DECL_P (decl2));
1652 install_var_field (decl2, true, 3, ctx);
1653 install_var_local (decl2, ctx);
1654 install_var_local (decl, ctx);
1656 else
1658 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1659 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1660 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1661 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1662 install_var_field (decl, true, 7, ctx);
1663 else
1664 install_var_field (decl, true, 3, ctx);
1665 if (gimple_omp_target_kind (ctx->stmt)
1666 == GF_OMP_TARGET_KIND_REGION)
1667 install_var_local (decl, ctx);
1670 else
1672 tree base = get_base_address (decl);
1673 tree nc = OMP_CLAUSE_CHAIN (c);
1674 if (DECL_P (base)
1675 && nc != NULL_TREE
1676 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1677 && OMP_CLAUSE_DECL (nc) == base
1678 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1679 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1681 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1682 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1684 else
1686 if (ctx->outer)
1688 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1689 decl = OMP_CLAUSE_DECL (c);
1691 gcc_assert (!splay_tree_lookup (ctx->field_map,
1692 (splay_tree_key) decl));
1693 tree field
1694 = build_decl (OMP_CLAUSE_LOCATION (c),
1695 FIELD_DECL, NULL_TREE, ptr_type_node);
1696 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1697 insert_field_into_struct (ctx->record_type, field);
1698 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1699 (splay_tree_value) field);
1702 break;
1704 case OMP_CLAUSE_NOWAIT:
1705 case OMP_CLAUSE_ORDERED:
1706 case OMP_CLAUSE_COLLAPSE:
1707 case OMP_CLAUSE_UNTIED:
1708 case OMP_CLAUSE_MERGEABLE:
1709 case OMP_CLAUSE_PROC_BIND:
1710 case OMP_CLAUSE_SAFELEN:
1711 break;
1713 case OMP_CLAUSE_ALIGNED:
1714 decl = OMP_CLAUSE_DECL (c);
1715 if (is_global_var (decl)
1716 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1717 install_var_local (decl, ctx);
1718 break;
1720 default:
1721 gcc_unreachable ();
1725 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1727 switch (OMP_CLAUSE_CODE (c))
1729 case OMP_CLAUSE_LASTPRIVATE:
1730 /* Let the corresponding firstprivate clause create
1731 the variable. */
1732 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1733 scan_array_reductions = true;
1734 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1735 break;
1736 /* FALLTHRU */
1738 case OMP_CLAUSE_PRIVATE:
1739 case OMP_CLAUSE_FIRSTPRIVATE:
1740 case OMP_CLAUSE_REDUCTION:
1741 case OMP_CLAUSE_LINEAR:
1742 decl = OMP_CLAUSE_DECL (c);
1743 if (is_variable_sized (decl))
1744 install_var_local (decl, ctx);
1745 fixup_remapped_decl (decl, ctx,
1746 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1747 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1748 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1749 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1750 scan_array_reductions = true;
1751 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1752 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1753 scan_array_reductions = true;
1754 break;
1756 case OMP_CLAUSE_SHARED:
1757 /* Ignore shared directives in teams construct. */
1758 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1759 break;
1760 decl = OMP_CLAUSE_DECL (c);
1761 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1762 fixup_remapped_decl (decl, ctx, false);
1763 break;
1765 case OMP_CLAUSE_MAP:
1766 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1767 break;
1768 decl = OMP_CLAUSE_DECL (c);
1769 if (DECL_P (decl)
1770 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1771 && lookup_attribute ("omp declare target",
1772 DECL_ATTRIBUTES (decl)))
1773 break;
1774 if (DECL_P (decl))
1776 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1777 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1778 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1780 tree new_decl = lookup_decl (decl, ctx);
1781 TREE_TYPE (new_decl)
1782 = remap_type (TREE_TYPE (decl), &ctx->cb);
1784 else if (DECL_SIZE (decl)
1785 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1787 tree decl2 = DECL_VALUE_EXPR (decl);
1788 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1789 decl2 = TREE_OPERAND (decl2, 0);
1790 gcc_assert (DECL_P (decl2));
1791 fixup_remapped_decl (decl2, ctx, false);
1792 fixup_remapped_decl (decl, ctx, true);
1794 else
1795 fixup_remapped_decl (decl, ctx, false);
1797 break;
1799 case OMP_CLAUSE_COPYPRIVATE:
1800 case OMP_CLAUSE_COPYIN:
1801 case OMP_CLAUSE_DEFAULT:
1802 case OMP_CLAUSE_IF:
1803 case OMP_CLAUSE_NUM_THREADS:
1804 case OMP_CLAUSE_NUM_TEAMS:
1805 case OMP_CLAUSE_THREAD_LIMIT:
1806 case OMP_CLAUSE_DEVICE:
1807 case OMP_CLAUSE_SCHEDULE:
1808 case OMP_CLAUSE_DIST_SCHEDULE:
1809 case OMP_CLAUSE_NOWAIT:
1810 case OMP_CLAUSE_ORDERED:
1811 case OMP_CLAUSE_COLLAPSE:
1812 case OMP_CLAUSE_UNTIED:
1813 case OMP_CLAUSE_FINAL:
1814 case OMP_CLAUSE_MERGEABLE:
1815 case OMP_CLAUSE_PROC_BIND:
1816 case OMP_CLAUSE_SAFELEN:
1817 case OMP_CLAUSE_ALIGNED:
1818 case OMP_CLAUSE_DEPEND:
1819 case OMP_CLAUSE__LOOPTEMP_:
1820 case OMP_CLAUSE_TO:
1821 case OMP_CLAUSE_FROM:
1822 case OMP_CLAUSE__CILK_FOR_COUNT_:
1823 break;
1825 default:
1826 gcc_unreachable ();
1830 if (scan_array_reductions)
1831 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1832 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1833 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1835 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1836 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1838 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1839 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1840 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1841 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1842 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1843 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1846 /* Create a new name for omp child function. Returns an identifier. If
1847 IS_CILK_FOR is true then the suffix for the child function is
1848 "_cilk_for_fn." */
1850 static tree
1851 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
1853 if (is_cilk_for)
1854 return clone_function_name (current_function_decl, "_cilk_for_fn");
1855 return clone_function_name (current_function_decl,
1856 task_copy ? "_omp_cpyfn" : "_omp_fn");
1859 /* Returns the type of the induction variable for the child function for
1860 _Cilk_for and the types for _high and _low variables based on TYPE. */
1862 static tree
1863 cilk_for_check_loop_diff_type (tree type)
1865 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
1867 if (TYPE_UNSIGNED (type))
1868 return uint32_type_node;
1869 else
1870 return integer_type_node;
1872 else
1874 if (TYPE_UNSIGNED (type))
1875 return uint64_type_node;
1876 else
1877 return long_long_integer_type_node;
1881 /* Build a decl for the omp child function. It'll not contain a body
1882 yet, just the bare decl. */
1884 static void
1885 create_omp_child_function (omp_context *ctx, bool task_copy)
1887 tree decl, type, name, t;
1889 tree cilk_for_count
1890 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
1891 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
1892 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
1893 tree cilk_var_type = NULL_TREE;
1895 name = create_omp_child_function_name (task_copy,
1896 cilk_for_count != NULL_TREE);
1897 if (task_copy)
1898 type = build_function_type_list (void_type_node, ptr_type_node,
1899 ptr_type_node, NULL_TREE);
1900 else if (cilk_for_count)
1902 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
1903 cilk_var_type = cilk_for_check_loop_diff_type (type);
1904 type = build_function_type_list (void_type_node, ptr_type_node,
1905 cilk_var_type, cilk_var_type, NULL_TREE);
1907 else
1908 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1910 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
1912 if (!task_copy)
1913 ctx->cb.dst_fn = decl;
1914 else
1915 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1917 TREE_STATIC (decl) = 1;
1918 TREE_USED (decl) = 1;
1919 DECL_ARTIFICIAL (decl) = 1;
1920 DECL_IGNORED_P (decl) = 0;
1921 TREE_PUBLIC (decl) = 0;
1922 DECL_UNINLINABLE (decl) = 1;
1923 DECL_EXTERNAL (decl) = 0;
1924 DECL_CONTEXT (decl) = NULL_TREE;
1925 DECL_INITIAL (decl) = make_node (BLOCK);
1926 bool target_p = false;
1927 if (lookup_attribute ("omp declare target",
1928 DECL_ATTRIBUTES (current_function_decl)))
1929 target_p = true;
1930 else
1932 omp_context *octx;
1933 for (octx = ctx; octx; octx = octx->outer)
1934 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1935 && gimple_omp_target_kind (octx->stmt)
1936 == GF_OMP_TARGET_KIND_REGION)
1938 target_p = true;
1939 break;
1942 if (target_p)
1943 DECL_ATTRIBUTES (decl)
1944 = tree_cons (get_identifier ("omp declare target"),
1945 NULL_TREE, DECL_ATTRIBUTES (decl));
1947 t = build_decl (DECL_SOURCE_LOCATION (decl),
1948 RESULT_DECL, NULL_TREE, void_type_node);
1949 DECL_ARTIFICIAL (t) = 1;
1950 DECL_IGNORED_P (t) = 1;
1951 DECL_CONTEXT (t) = decl;
1952 DECL_RESULT (decl) = t;
1954 /* _Cilk_for's child function requires two extra parameters called
1955 __low and __high that are set the by Cilk runtime when it calls this
1956 function. */
1957 if (cilk_for_count)
1959 t = build_decl (DECL_SOURCE_LOCATION (decl),
1960 PARM_DECL, get_identifier ("__high"), cilk_var_type);
1961 DECL_ARTIFICIAL (t) = 1;
1962 DECL_NAMELESS (t) = 1;
1963 DECL_ARG_TYPE (t) = ptr_type_node;
1964 DECL_CONTEXT (t) = current_function_decl;
1965 TREE_USED (t) = 1;
1966 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1967 DECL_ARGUMENTS (decl) = t;
1969 t = build_decl (DECL_SOURCE_LOCATION (decl),
1970 PARM_DECL, get_identifier ("__low"), cilk_var_type);
1971 DECL_ARTIFICIAL (t) = 1;
1972 DECL_NAMELESS (t) = 1;
1973 DECL_ARG_TYPE (t) = ptr_type_node;
1974 DECL_CONTEXT (t) = current_function_decl;
1975 TREE_USED (t) = 1;
1976 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1977 DECL_ARGUMENTS (decl) = t;
1980 tree data_name = get_identifier (".omp_data_i");
1981 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
1982 ptr_type_node);
1983 DECL_ARTIFICIAL (t) = 1;
1984 DECL_NAMELESS (t) = 1;
1985 DECL_ARG_TYPE (t) = ptr_type_node;
1986 DECL_CONTEXT (t) = current_function_decl;
1987 TREE_USED (t) = 1;
1988 if (cilk_for_count)
1989 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1990 DECL_ARGUMENTS (decl) = t;
1991 if (!task_copy)
1992 ctx->receiver_decl = t;
1993 else
1995 t = build_decl (DECL_SOURCE_LOCATION (decl),
1996 PARM_DECL, get_identifier (".omp_data_o"),
1997 ptr_type_node);
1998 DECL_ARTIFICIAL (t) = 1;
1999 DECL_NAMELESS (t) = 1;
2000 DECL_ARG_TYPE (t) = ptr_type_node;
2001 DECL_CONTEXT (t) = current_function_decl;
2002 TREE_USED (t) = 1;
2003 TREE_ADDRESSABLE (t) = 1;
2004 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2005 DECL_ARGUMENTS (decl) = t;
2008 /* Allocate memory for the function structure. The call to
2009 allocate_struct_function clobbers CFUN, so we need to restore
2010 it afterward. */
2011 push_struct_function (decl);
2012 cfun->function_end_locus = gimple_location (ctx->stmt);
2013 pop_cfun ();
2016 /* Callback for walk_gimple_seq. Check if combined parallel
2017 contains gimple_omp_for_combined_into_p OMP_FOR. */
2019 static tree
2020 find_combined_for (gimple_stmt_iterator *gsi_p,
2021 bool *handled_ops_p,
2022 struct walk_stmt_info *wi)
2024 gimple stmt = gsi_stmt (*gsi_p);
2026 *handled_ops_p = true;
2027 switch (gimple_code (stmt))
2029 WALK_SUBSTMTS;
2031 case GIMPLE_OMP_FOR:
2032 if (gimple_omp_for_combined_into_p (stmt)
2033 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2035 wi->info = stmt;
2036 return integer_zero_node;
2038 break;
2039 default:
2040 break;
2042 return NULL;
2045 /* Scan an OpenMP parallel directive. */
2047 static void
2048 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2050 omp_context *ctx;
2051 tree name;
2052 gimple stmt = gsi_stmt (*gsi);
2054 /* Ignore parallel directives with empty bodies, unless there
2055 are copyin clauses. */
2056 if (optimize > 0
2057 && empty_body_p (gimple_omp_body (stmt))
2058 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2059 OMP_CLAUSE_COPYIN) == NULL)
2061 gsi_replace (gsi, gimple_build_nop (), false);
2062 return;
2065 if (gimple_omp_parallel_combined_p (stmt))
2067 gimple for_stmt;
2068 struct walk_stmt_info wi;
2070 memset (&wi, 0, sizeof (wi));
2071 wi.val_only = true;
2072 walk_gimple_seq (gimple_omp_body (stmt),
2073 find_combined_for, NULL, &wi);
2074 for_stmt = (gimple) wi.info;
2075 if (for_stmt)
2077 struct omp_for_data fd;
2078 extract_omp_for_data (for_stmt, &fd, NULL);
2079 /* We need two temporaries with fd.loop.v type (istart/iend)
2080 and then (fd.collapse - 1) temporaries with the same
2081 type for count2 ... countN-1 vars if not constant. */
2082 size_t count = 2, i;
2083 tree type = fd.iter_type;
2084 if (fd.collapse > 1
2085 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2086 count += fd.collapse - 1;
2087 for (i = 0; i < count; i++)
2089 tree temp = create_tmp_var (type, NULL);
2090 tree c = build_omp_clause (UNKNOWN_LOCATION,
2091 OMP_CLAUSE__LOOPTEMP_);
2092 insert_decl_map (&outer_ctx->cb, temp, temp);
2093 OMP_CLAUSE_DECL (c) = temp;
2094 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2095 gimple_omp_parallel_set_clauses (stmt, c);
2100 ctx = new_omp_context (stmt, outer_ctx);
2101 taskreg_contexts.safe_push (ctx);
2102 if (taskreg_nesting_level > 1)
2103 ctx->is_nested = true;
2104 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2105 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2106 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2107 name = create_tmp_var_name (".omp_data_s");
2108 name = build_decl (gimple_location (stmt),
2109 TYPE_DECL, name, ctx->record_type);
2110 DECL_ARTIFICIAL (name) = 1;
2111 DECL_NAMELESS (name) = 1;
2112 TYPE_NAME (ctx->record_type) = name;
2113 create_omp_child_function (ctx, false);
2114 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2116 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2117 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2119 if (TYPE_FIELDS (ctx->record_type) == NULL)
2120 ctx->record_type = ctx->receiver_decl = NULL;
2123 /* Scan an OpenMP task directive. */
2125 static void
2126 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2128 omp_context *ctx;
2129 tree name, t;
2130 gimple stmt = gsi_stmt (*gsi);
2132 /* Ignore task directives with empty bodies. */
2133 if (optimize > 0
2134 && empty_body_p (gimple_omp_body (stmt)))
2136 gsi_replace (gsi, gimple_build_nop (), false);
2137 return;
2140 ctx = new_omp_context (stmt, outer_ctx);
2141 taskreg_contexts.safe_push (ctx);
2142 if (taskreg_nesting_level > 1)
2143 ctx->is_nested = true;
2144 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2145 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2146 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2147 name = create_tmp_var_name (".omp_data_s");
2148 name = build_decl (gimple_location (stmt),
2149 TYPE_DECL, name, ctx->record_type);
2150 DECL_ARTIFICIAL (name) = 1;
2151 DECL_NAMELESS (name) = 1;
2152 TYPE_NAME (ctx->record_type) = name;
2153 create_omp_child_function (ctx, false);
2154 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2156 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2158 if (ctx->srecord_type)
2160 name = create_tmp_var_name (".omp_data_a");
2161 name = build_decl (gimple_location (stmt),
2162 TYPE_DECL, name, ctx->srecord_type);
2163 DECL_ARTIFICIAL (name) = 1;
2164 DECL_NAMELESS (name) = 1;
2165 TYPE_NAME (ctx->srecord_type) = name;
2166 create_omp_child_function (ctx, true);
2169 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2171 if (TYPE_FIELDS (ctx->record_type) == NULL)
2173 ctx->record_type = ctx->receiver_decl = NULL;
2174 t = build_int_cst (long_integer_type_node, 0);
2175 gimple_omp_task_set_arg_size (stmt, t);
2176 t = build_int_cst (long_integer_type_node, 1);
2177 gimple_omp_task_set_arg_align (stmt, t);
2182 /* If any decls have been made addressable during scan_omp,
2183 adjust their fields if needed, and layout record types
2184 of parallel/task constructs. */
2186 static void
2187 finish_taskreg_scan (omp_context *ctx)
2189 if (ctx->record_type == NULL_TREE)
2190 return;
2192 /* If any task_shared_vars were needed, verify all
2193 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2194 statements if use_pointer_for_field hasn't changed
2195 because of that. If it did, update field types now. */
2196 if (task_shared_vars)
2198 tree c;
2200 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2201 c; c = OMP_CLAUSE_CHAIN (c))
2202 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
2204 tree decl = OMP_CLAUSE_DECL (c);
2206 /* Global variables don't need to be copied,
2207 the receiver side will use them directly. */
2208 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2209 continue;
2210 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2211 || !use_pointer_for_field (decl, ctx))
2212 continue;
2213 tree field = lookup_field (decl, ctx);
2214 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2215 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2216 continue;
2217 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2218 TREE_THIS_VOLATILE (field) = 0;
2219 DECL_USER_ALIGN (field) = 0;
2220 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2221 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2222 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2223 if (ctx->srecord_type)
2225 tree sfield = lookup_sfield (decl, ctx);
2226 TREE_TYPE (sfield) = TREE_TYPE (field);
2227 TREE_THIS_VOLATILE (sfield) = 0;
2228 DECL_USER_ALIGN (sfield) = 0;
2229 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2230 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2231 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2236 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2238 layout_type (ctx->record_type);
2239 fixup_child_record_type (ctx);
2241 else
2243 location_t loc = gimple_location (ctx->stmt);
2244 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2245 /* Move VLA fields to the end. */
2246 p = &TYPE_FIELDS (ctx->record_type);
2247 while (*p)
2248 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2249 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2251 *q = *p;
2252 *p = TREE_CHAIN (*p);
2253 TREE_CHAIN (*q) = NULL_TREE;
2254 q = &TREE_CHAIN (*q);
2256 else
2257 p = &DECL_CHAIN (*p);
2258 *p = vla_fields;
2259 layout_type (ctx->record_type);
2260 fixup_child_record_type (ctx);
2261 if (ctx->srecord_type)
2262 layout_type (ctx->srecord_type);
2263 tree t = fold_convert_loc (loc, long_integer_type_node,
2264 TYPE_SIZE_UNIT (ctx->record_type));
2265 gimple_omp_task_set_arg_size (ctx->stmt, t);
2266 t = build_int_cst (long_integer_type_node,
2267 TYPE_ALIGN_UNIT (ctx->record_type));
2268 gimple_omp_task_set_arg_align (ctx->stmt, t);
2273 /* Scan an OpenMP loop directive. */
2275 static void
2276 scan_omp_for (gimple stmt, omp_context *outer_ctx)
2278 omp_context *ctx;
2279 size_t i;
2281 ctx = new_omp_context (stmt, outer_ctx);
2283 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2285 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2286 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2288 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2289 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2290 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2291 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2293 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2296 /* Scan an OpenMP sections directive. */
2298 static void
2299 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
2301 omp_context *ctx;
2303 ctx = new_omp_context (stmt, outer_ctx);
2304 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2305 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2308 /* Scan an OpenMP single directive. */
2310 static void
2311 scan_omp_single (gimple stmt, omp_context *outer_ctx)
2313 omp_context *ctx;
2314 tree name;
2316 ctx = new_omp_context (stmt, outer_ctx);
2317 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2318 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2319 name = create_tmp_var_name (".omp_copy_s");
2320 name = build_decl (gimple_location (stmt),
2321 TYPE_DECL, name, ctx->record_type);
2322 TYPE_NAME (ctx->record_type) = name;
2324 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2325 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2327 if (TYPE_FIELDS (ctx->record_type) == NULL)
2328 ctx->record_type = NULL;
2329 else
2330 layout_type (ctx->record_type);
2333 /* Scan an OpenMP target{, data, update} directive. */
2335 static void
2336 scan_omp_target (gimple stmt, omp_context *outer_ctx)
2338 omp_context *ctx;
2339 tree name;
2340 int kind = gimple_omp_target_kind (stmt);
2342 ctx = new_omp_context (stmt, outer_ctx);
2343 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2344 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2345 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2346 name = create_tmp_var_name (".omp_data_t");
2347 name = build_decl (gimple_location (stmt),
2348 TYPE_DECL, name, ctx->record_type);
2349 DECL_ARTIFICIAL (name) = 1;
2350 DECL_NAMELESS (name) = 1;
2351 TYPE_NAME (ctx->record_type) = name;
2352 if (kind == GF_OMP_TARGET_KIND_REGION)
2354 create_omp_child_function (ctx, false);
2355 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2358 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2359 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2361 if (TYPE_FIELDS (ctx->record_type) == NULL)
2362 ctx->record_type = ctx->receiver_decl = NULL;
2363 else
2365 TYPE_FIELDS (ctx->record_type)
2366 = nreverse (TYPE_FIELDS (ctx->record_type));
2367 #ifdef ENABLE_CHECKING
2368 tree field;
2369 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2370 for (field = TYPE_FIELDS (ctx->record_type);
2371 field;
2372 field = DECL_CHAIN (field))
2373 gcc_assert (DECL_ALIGN (field) == align);
2374 #endif
2375 layout_type (ctx->record_type);
2376 if (kind == GF_OMP_TARGET_KIND_REGION)
2377 fixup_child_record_type (ctx);
2381 /* Scan an OpenMP teams directive. */
2383 static void
2384 scan_omp_teams (gimple stmt, omp_context *outer_ctx)
2386 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2387 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2388 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2391 /* Check OpenMP nesting restrictions. */
2392 static bool
2393 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2395 if (ctx != NULL)
2397 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2398 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2400 error_at (gimple_location (stmt),
2401 "OpenMP constructs may not be nested inside simd region");
2402 return false;
2404 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2406 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2407 || (gimple_omp_for_kind (stmt)
2408 != GF_OMP_FOR_KIND_DISTRIBUTE))
2409 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2411 error_at (gimple_location (stmt),
2412 "only distribute or parallel constructs are allowed to "
2413 "be closely nested inside teams construct");
2414 return false;
2418 switch (gimple_code (stmt))
2420 case GIMPLE_OMP_FOR:
2421 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2422 return true;
2423 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2425 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2427 error_at (gimple_location (stmt),
2428 "distribute construct must be closely nested inside "
2429 "teams construct");
2430 return false;
2432 return true;
2434 /* FALLTHRU */
2435 case GIMPLE_CALL:
2436 if (is_gimple_call (stmt)
2437 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2438 == BUILT_IN_GOMP_CANCEL
2439 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2440 == BUILT_IN_GOMP_CANCELLATION_POINT))
2442 const char *bad = NULL;
2443 const char *kind = NULL;
2444 if (ctx == NULL)
2446 error_at (gimple_location (stmt), "orphaned %qs construct",
2447 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2448 == BUILT_IN_GOMP_CANCEL
2449 ? "#pragma omp cancel"
2450 : "#pragma omp cancellation point");
2451 return false;
2453 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2454 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2455 : 0)
2457 case 1:
2458 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2459 bad = "#pragma omp parallel";
2460 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2461 == BUILT_IN_GOMP_CANCEL
2462 && !integer_zerop (gimple_call_arg (stmt, 1)))
2463 ctx->cancellable = true;
2464 kind = "parallel";
2465 break;
2466 case 2:
2467 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2468 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2469 bad = "#pragma omp for";
2470 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2471 == BUILT_IN_GOMP_CANCEL
2472 && !integer_zerop (gimple_call_arg (stmt, 1)))
2474 ctx->cancellable = true;
2475 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2476 OMP_CLAUSE_NOWAIT))
2477 warning_at (gimple_location (stmt), 0,
2478 "%<#pragma omp cancel for%> inside "
2479 "%<nowait%> for construct");
2480 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2481 OMP_CLAUSE_ORDERED))
2482 warning_at (gimple_location (stmt), 0,
2483 "%<#pragma omp cancel for%> inside "
2484 "%<ordered%> for construct");
2486 kind = "for";
2487 break;
2488 case 4:
2489 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2490 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2491 bad = "#pragma omp sections";
2492 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2493 == BUILT_IN_GOMP_CANCEL
2494 && !integer_zerop (gimple_call_arg (stmt, 1)))
2496 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2498 ctx->cancellable = true;
2499 if (find_omp_clause (gimple_omp_sections_clauses
2500 (ctx->stmt),
2501 OMP_CLAUSE_NOWAIT))
2502 warning_at (gimple_location (stmt), 0,
2503 "%<#pragma omp cancel sections%> inside "
2504 "%<nowait%> sections construct");
2506 else
2508 gcc_assert (ctx->outer
2509 && gimple_code (ctx->outer->stmt)
2510 == GIMPLE_OMP_SECTIONS);
2511 ctx->outer->cancellable = true;
2512 if (find_omp_clause (gimple_omp_sections_clauses
2513 (ctx->outer->stmt),
2514 OMP_CLAUSE_NOWAIT))
2515 warning_at (gimple_location (stmt), 0,
2516 "%<#pragma omp cancel sections%> inside "
2517 "%<nowait%> sections construct");
2520 kind = "sections";
2521 break;
2522 case 8:
2523 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2524 bad = "#pragma omp task";
2525 else
2526 ctx->cancellable = true;
2527 kind = "taskgroup";
2528 break;
2529 default:
2530 error_at (gimple_location (stmt), "invalid arguments");
2531 return false;
2533 if (bad)
2535 error_at (gimple_location (stmt),
2536 "%<%s %s%> construct not closely nested inside of %qs",
2537 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2538 == BUILT_IN_GOMP_CANCEL
2539 ? "#pragma omp cancel"
2540 : "#pragma omp cancellation point", kind, bad);
2541 return false;
2544 /* FALLTHRU */
2545 case GIMPLE_OMP_SECTIONS:
2546 case GIMPLE_OMP_SINGLE:
2547 for (; ctx != NULL; ctx = ctx->outer)
2548 switch (gimple_code (ctx->stmt))
2550 case GIMPLE_OMP_FOR:
2551 case GIMPLE_OMP_SECTIONS:
2552 case GIMPLE_OMP_SINGLE:
2553 case GIMPLE_OMP_ORDERED:
2554 case GIMPLE_OMP_MASTER:
2555 case GIMPLE_OMP_TASK:
2556 case GIMPLE_OMP_CRITICAL:
2557 if (is_gimple_call (stmt))
2559 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2560 != BUILT_IN_GOMP_BARRIER)
2561 return true;
2562 error_at (gimple_location (stmt),
2563 "barrier region may not be closely nested inside "
2564 "of work-sharing, critical, ordered, master or "
2565 "explicit task region");
2566 return false;
2568 error_at (gimple_location (stmt),
2569 "work-sharing region may not be closely nested inside "
2570 "of work-sharing, critical, ordered, master or explicit "
2571 "task region");
2572 return false;
2573 case GIMPLE_OMP_PARALLEL:
2574 return true;
2575 default:
2576 break;
2578 break;
2579 case GIMPLE_OMP_MASTER:
2580 for (; ctx != NULL; ctx = ctx->outer)
2581 switch (gimple_code (ctx->stmt))
2583 case GIMPLE_OMP_FOR:
2584 case GIMPLE_OMP_SECTIONS:
2585 case GIMPLE_OMP_SINGLE:
2586 case GIMPLE_OMP_TASK:
2587 error_at (gimple_location (stmt),
2588 "master region may not be closely nested inside "
2589 "of work-sharing or explicit task region");
2590 return false;
2591 case GIMPLE_OMP_PARALLEL:
2592 return true;
2593 default:
2594 break;
2596 break;
2597 case GIMPLE_OMP_ORDERED:
2598 for (; ctx != NULL; ctx = ctx->outer)
2599 switch (gimple_code (ctx->stmt))
2601 case GIMPLE_OMP_CRITICAL:
2602 case GIMPLE_OMP_TASK:
2603 error_at (gimple_location (stmt),
2604 "ordered region may not be closely nested inside "
2605 "of critical or explicit task region");
2606 return false;
2607 case GIMPLE_OMP_FOR:
2608 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2609 OMP_CLAUSE_ORDERED) == NULL)
2611 error_at (gimple_location (stmt),
2612 "ordered region must be closely nested inside "
2613 "a loop region with an ordered clause");
2614 return false;
2616 return true;
2617 case GIMPLE_OMP_PARALLEL:
2618 error_at (gimple_location (stmt),
2619 "ordered region must be closely nested inside "
2620 "a loop region with an ordered clause");
2621 return false;
2622 default:
2623 break;
2625 break;
2626 case GIMPLE_OMP_CRITICAL:
2627 for (; ctx != NULL; ctx = ctx->outer)
2628 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
2629 && (gimple_omp_critical_name (stmt)
2630 == gimple_omp_critical_name (ctx->stmt)))
2632 error_at (gimple_location (stmt),
2633 "critical region may not be nested inside a critical "
2634 "region with the same name");
2635 return false;
2637 break;
2638 case GIMPLE_OMP_TEAMS:
2639 if (ctx == NULL
2640 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2641 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2643 error_at (gimple_location (stmt),
2644 "teams construct not closely nested inside of target "
2645 "region");
2646 return false;
2648 break;
2649 case GIMPLE_OMP_TARGET:
2650 for (; ctx != NULL; ctx = ctx->outer)
2651 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
2652 && gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_REGION)
2654 const char *name;
2655 switch (gimple_omp_target_kind (stmt))
2657 case GF_OMP_TARGET_KIND_REGION: name = "target"; break;
2658 case GF_OMP_TARGET_KIND_DATA: name = "target data"; break;
2659 case GF_OMP_TARGET_KIND_UPDATE: name = "target update"; break;
2660 default: gcc_unreachable ();
2662 warning_at (gimple_location (stmt), 0,
2663 "%s construct inside of target region", name);
2665 break;
2666 default:
2667 break;
2669 return true;
2673 /* Helper function scan_omp.
2675 Callback for walk_tree or operators in walk_gimple_stmt used to
2676 scan for OpenMP directives in TP. */
2678 static tree
2679 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2681 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2682 omp_context *ctx = (omp_context *) wi->info;
2683 tree t = *tp;
2685 switch (TREE_CODE (t))
2687 case VAR_DECL:
2688 case PARM_DECL:
2689 case LABEL_DECL:
2690 case RESULT_DECL:
2691 if (ctx)
2692 *tp = remap_decl (t, &ctx->cb);
2693 break;
2695 default:
2696 if (ctx && TYPE_P (t))
2697 *tp = remap_type (t, &ctx->cb);
2698 else if (!DECL_P (t))
2700 *walk_subtrees = 1;
2701 if (ctx)
2703 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2704 if (tem != TREE_TYPE (t))
2706 if (TREE_CODE (t) == INTEGER_CST)
2707 *tp = wide_int_to_tree (tem, t);
2708 else
2709 TREE_TYPE (t) = tem;
2713 break;
2716 return NULL_TREE;
2719 /* Return true if FNDECL is a setjmp or a longjmp. */
2721 static bool
2722 setjmp_or_longjmp_p (const_tree fndecl)
2724 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2725 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2726 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2727 return true;
2729 tree declname = DECL_NAME (fndecl);
2730 if (!declname)
2731 return false;
2732 const char *name = IDENTIFIER_POINTER (declname);
2733 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2737 /* Helper function for scan_omp.
2739 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2740 the current statement in GSI. */
2742 static tree
2743 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2744 struct walk_stmt_info *wi)
2746 gimple stmt = gsi_stmt (*gsi);
2747 omp_context *ctx = (omp_context *) wi->info;
2749 if (gimple_has_location (stmt))
2750 input_location = gimple_location (stmt);
2752 /* Check the OpenMP nesting restrictions. */
2753 bool remove = false;
2754 if (is_gimple_omp (stmt))
2755 remove = !check_omp_nesting_restrictions (stmt, ctx);
2756 else if (is_gimple_call (stmt))
2758 tree fndecl = gimple_call_fndecl (stmt);
2759 if (fndecl)
2761 if (setjmp_or_longjmp_p (fndecl)
2762 && ctx
2763 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2764 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2766 remove = true;
2767 error_at (gimple_location (stmt),
2768 "setjmp/longjmp inside simd construct");
2770 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2771 switch (DECL_FUNCTION_CODE (fndecl))
2773 case BUILT_IN_GOMP_BARRIER:
2774 case BUILT_IN_GOMP_CANCEL:
2775 case BUILT_IN_GOMP_CANCELLATION_POINT:
2776 case BUILT_IN_GOMP_TASKYIELD:
2777 case BUILT_IN_GOMP_TASKWAIT:
2778 case BUILT_IN_GOMP_TASKGROUP_START:
2779 case BUILT_IN_GOMP_TASKGROUP_END:
2780 remove = !check_omp_nesting_restrictions (stmt, ctx);
2781 break;
2782 default:
2783 break;
2787 if (remove)
2789 stmt = gimple_build_nop ();
2790 gsi_replace (gsi, stmt, false);
2793 *handled_ops_p = true;
2795 switch (gimple_code (stmt))
2797 case GIMPLE_OMP_PARALLEL:
2798 taskreg_nesting_level++;
2799 scan_omp_parallel (gsi, ctx);
2800 taskreg_nesting_level--;
2801 break;
2803 case GIMPLE_OMP_TASK:
2804 taskreg_nesting_level++;
2805 scan_omp_task (gsi, ctx);
2806 taskreg_nesting_level--;
2807 break;
2809 case GIMPLE_OMP_FOR:
2810 scan_omp_for (stmt, ctx);
2811 break;
2813 case GIMPLE_OMP_SECTIONS:
2814 scan_omp_sections (stmt, ctx);
2815 break;
2817 case GIMPLE_OMP_SINGLE:
2818 scan_omp_single (stmt, ctx);
2819 break;
2821 case GIMPLE_OMP_SECTION:
2822 case GIMPLE_OMP_MASTER:
2823 case GIMPLE_OMP_TASKGROUP:
2824 case GIMPLE_OMP_ORDERED:
2825 case GIMPLE_OMP_CRITICAL:
2826 ctx = new_omp_context (stmt, ctx);
2827 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2828 break;
2830 case GIMPLE_OMP_TARGET:
2831 scan_omp_target (stmt, ctx);
2832 break;
2834 case GIMPLE_OMP_TEAMS:
2835 scan_omp_teams (stmt, ctx);
2836 break;
2838 case GIMPLE_BIND:
2840 tree var;
2842 *handled_ops_p = false;
2843 if (ctx)
2844 for (var = gimple_bind_vars (as_a <gimple_bind> (stmt));
2845 var ;
2846 var = DECL_CHAIN (var))
2847 insert_decl_map (&ctx->cb, var, var);
2849 break;
2850 default:
2851 *handled_ops_p = false;
2852 break;
2855 return NULL_TREE;
2859 /* Scan all the statements starting at the current statement. CTX
2860 contains context information about the OpenMP directives and
2861 clauses found during the scan. */
2863 static void
2864 scan_omp (gimple_seq *body_p, omp_context *ctx)
2866 location_t saved_location;
2867 struct walk_stmt_info wi;
2869 memset (&wi, 0, sizeof (wi));
2870 wi.info = ctx;
2871 wi.want_locations = true;
2873 saved_location = input_location;
2874 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2875 input_location = saved_location;
2878 /* Re-gimplification and code generation routines. */
2880 /* Build a call to GOMP_barrier. */
2882 static gimple
2883 build_omp_barrier (tree lhs)
2885 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2886 : BUILT_IN_GOMP_BARRIER);
2887 gimple_call g = gimple_build_call (fndecl, 0);
2888 if (lhs)
2889 gimple_call_set_lhs (g, lhs);
2890 return g;
2893 /* If a context was created for STMT when it was scanned, return it. */
2895 static omp_context *
2896 maybe_lookup_ctx (gimple stmt)
2898 splay_tree_node n;
2899 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2900 return n ? (omp_context *) n->value : NULL;
2904 /* Find the mapping for DECL in CTX or the immediately enclosing
2905 context that has a mapping for DECL.
2907 If CTX is a nested parallel directive, we may have to use the decl
2908 mappings created in CTX's parent context. Suppose that we have the
2909 following parallel nesting (variable UIDs showed for clarity):
2911 iD.1562 = 0;
2912 #omp parallel shared(iD.1562) -> outer parallel
2913 iD.1562 = iD.1562 + 1;
2915 #omp parallel shared (iD.1562) -> inner parallel
2916 iD.1562 = iD.1562 - 1;
2918 Each parallel structure will create a distinct .omp_data_s structure
2919 for copying iD.1562 in/out of the directive:
2921 outer parallel .omp_data_s.1.i -> iD.1562
2922 inner parallel .omp_data_s.2.i -> iD.1562
2924 A shared variable mapping will produce a copy-out operation before
2925 the parallel directive and a copy-in operation after it. So, in
2926 this case we would have:
2928 iD.1562 = 0;
2929 .omp_data_o.1.i = iD.1562;
2930 #omp parallel shared(iD.1562) -> outer parallel
2931 .omp_data_i.1 = &.omp_data_o.1
2932 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2934 .omp_data_o.2.i = iD.1562; -> **
2935 #omp parallel shared(iD.1562) -> inner parallel
2936 .omp_data_i.2 = &.omp_data_o.2
2937 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2940 ** This is a problem. The symbol iD.1562 cannot be referenced
2941 inside the body of the outer parallel region. But since we are
2942 emitting this copy operation while expanding the inner parallel
2943 directive, we need to access the CTX structure of the outer
2944 parallel directive to get the correct mapping:
2946 .omp_data_o.2.i = .omp_data_i.1->i
2948 Since there may be other workshare or parallel directives enclosing
2949 the parallel directive, it may be necessary to walk up the context
2950 parent chain. This is not a problem in general because nested
2951 parallelism happens only rarely. */
2953 static tree
2954 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2956 tree t;
2957 omp_context *up;
2959 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2960 t = maybe_lookup_decl (decl, up);
2962 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2964 return t ? t : decl;
2968 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2969 in outer contexts. */
2971 static tree
2972 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2974 tree t = NULL;
2975 omp_context *up;
2977 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2978 t = maybe_lookup_decl (decl, up);
2980 return t ? t : decl;
2984 /* Construct the initialization value for reduction CLAUSE. */
2986 tree
2987 omp_reduction_init (tree clause, tree type)
2989 location_t loc = OMP_CLAUSE_LOCATION (clause);
2990 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2992 case PLUS_EXPR:
2993 case MINUS_EXPR:
2994 case BIT_IOR_EXPR:
2995 case BIT_XOR_EXPR:
2996 case TRUTH_OR_EXPR:
2997 case TRUTH_ORIF_EXPR:
2998 case TRUTH_XOR_EXPR:
2999 case NE_EXPR:
3000 return build_zero_cst (type);
3002 case MULT_EXPR:
3003 case TRUTH_AND_EXPR:
3004 case TRUTH_ANDIF_EXPR:
3005 case EQ_EXPR:
3006 return fold_convert_loc (loc, type, integer_one_node);
3008 case BIT_AND_EXPR:
3009 return fold_convert_loc (loc, type, integer_minus_one_node);
3011 case MAX_EXPR:
3012 if (SCALAR_FLOAT_TYPE_P (type))
3014 REAL_VALUE_TYPE max, min;
3015 if (HONOR_INFINITIES (TYPE_MODE (type)))
3017 real_inf (&max);
3018 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3020 else
3021 real_maxval (&min, 1, TYPE_MODE (type));
3022 return build_real (type, min);
3024 else
3026 gcc_assert (INTEGRAL_TYPE_P (type));
3027 return TYPE_MIN_VALUE (type);
3030 case MIN_EXPR:
3031 if (SCALAR_FLOAT_TYPE_P (type))
3033 REAL_VALUE_TYPE max;
3034 if (HONOR_INFINITIES (TYPE_MODE (type)))
3035 real_inf (&max);
3036 else
3037 real_maxval (&max, 0, TYPE_MODE (type));
3038 return build_real (type, max);
3040 else
3042 gcc_assert (INTEGRAL_TYPE_P (type));
3043 return TYPE_MAX_VALUE (type);
3046 default:
3047 gcc_unreachable ();
3051 /* Return alignment to be assumed for var in CLAUSE, which should be
3052 OMP_CLAUSE_ALIGNED. */
3054 static tree
3055 omp_clause_aligned_alignment (tree clause)
3057 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3058 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3060 /* Otherwise return implementation defined alignment. */
3061 unsigned int al = 1;
3062 enum machine_mode mode, vmode;
3063 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3064 if (vs)
3065 vs = 1 << floor_log2 (vs);
3066 static enum mode_class classes[]
3067 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3068 for (int i = 0; i < 4; i += 2)
3069 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3070 mode != VOIDmode;
3071 mode = GET_MODE_WIDER_MODE (mode))
3073 vmode = targetm.vectorize.preferred_simd_mode (mode);
3074 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3075 continue;
3076 while (vs
3077 && GET_MODE_SIZE (vmode) < vs
3078 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3079 vmode = GET_MODE_2XWIDER_MODE (vmode);
3081 tree type = lang_hooks.types.type_for_mode (mode, 1);
3082 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3083 continue;
3084 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3085 / GET_MODE_SIZE (mode));
3086 if (TYPE_MODE (type) != vmode)
3087 continue;
3088 if (TYPE_ALIGN_UNIT (type) > al)
3089 al = TYPE_ALIGN_UNIT (type);
3091 return build_int_cst (integer_type_node, al);
3094 /* Return maximum possible vectorization factor for the target. */
3096 static int
3097 omp_max_vf (void)
3099 if (!optimize
3100 || optimize_debug
3101 || !flag_tree_loop_optimize
3102 || (!flag_tree_loop_vectorize
3103 && (global_options_set.x_flag_tree_loop_vectorize
3104 || global_options_set.x_flag_tree_vectorize)))
3105 return 1;
3107 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3108 if (vs)
3110 vs = 1 << floor_log2 (vs);
3111 return vs;
3113 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3114 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3115 return GET_MODE_NUNITS (vqimode);
3116 return 1;
3119 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3120 privatization. */
3122 static bool
3123 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3124 tree &idx, tree &lane, tree &ivar, tree &lvar)
3126 if (max_vf == 0)
3128 max_vf = omp_max_vf ();
3129 if (max_vf > 1)
3131 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3132 OMP_CLAUSE_SAFELEN);
3133 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3134 max_vf = 1;
3135 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3136 max_vf) == -1)
3137 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3139 if (max_vf > 1)
3141 idx = create_tmp_var (unsigned_type_node, NULL);
3142 lane = create_tmp_var (unsigned_type_node, NULL);
3145 if (max_vf == 1)
3146 return false;
3148 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3149 tree avar = create_tmp_var_raw (atype, NULL);
3150 if (TREE_ADDRESSABLE (new_var))
3151 TREE_ADDRESSABLE (avar) = 1;
3152 DECL_ATTRIBUTES (avar)
3153 = tree_cons (get_identifier ("omp simd array"), NULL,
3154 DECL_ATTRIBUTES (avar));
3155 gimple_add_tmp_var (avar);
3156 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3157 NULL_TREE, NULL_TREE);
3158 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3159 NULL_TREE, NULL_TREE);
3160 if (DECL_P (new_var))
3162 SET_DECL_VALUE_EXPR (new_var, lvar);
3163 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3165 return true;
3168 /* Helper function of lower_rec_input_clauses. For a reference
3169 in simd reduction, add an underlying variable it will reference. */
3171 static void
3172 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3174 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3175 if (TREE_CONSTANT (z))
3177 const char *name = NULL;
3178 if (DECL_NAME (new_vard))
3179 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3181 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3182 gimple_add_tmp_var (z);
3183 TREE_ADDRESSABLE (z) = 1;
3184 z = build_fold_addr_expr_loc (loc, z);
3185 gimplify_assign (new_vard, z, ilist);
3189 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3190 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3191 private variables. Initialization statements go in ILIST, while calls
3192 to destructors go in DLIST. */
3194 static void
3195 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3196 omp_context *ctx, struct omp_for_data *fd)
3198 tree c, dtor, copyin_seq, x, ptr;
3199 bool copyin_by_ref = false;
3200 bool lastprivate_firstprivate = false;
3201 bool reduction_omp_orig_ref = false;
3202 int pass;
3203 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3204 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3205 int max_vf = 0;
3206 tree lane = NULL_TREE, idx = NULL_TREE;
3207 tree ivar = NULL_TREE, lvar = NULL_TREE;
3208 gimple_seq llist[2] = { NULL, NULL };
3210 copyin_seq = NULL;
3212 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3213 with data sharing clauses referencing variable sized vars. That
3214 is unnecessarily hard to support and very unlikely to result in
3215 vectorized code anyway. */
3216 if (is_simd)
3217 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3218 switch (OMP_CLAUSE_CODE (c))
3220 case OMP_CLAUSE_LINEAR:
3221 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3222 max_vf = 1;
3223 /* FALLTHRU */
3224 case OMP_CLAUSE_REDUCTION:
3225 case OMP_CLAUSE_PRIVATE:
3226 case OMP_CLAUSE_FIRSTPRIVATE:
3227 case OMP_CLAUSE_LASTPRIVATE:
3228 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3229 max_vf = 1;
3230 break;
3231 default:
3232 continue;
3235 /* Do all the fixed sized types in the first pass, and the variable sized
3236 types in the second pass. This makes sure that the scalar arguments to
3237 the variable sized types are processed before we use them in the
3238 variable sized operations. */
3239 for (pass = 0; pass < 2; ++pass)
3241 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3243 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3244 tree var, new_var;
3245 bool by_ref;
3246 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3248 switch (c_kind)
3250 case OMP_CLAUSE_PRIVATE:
3251 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3252 continue;
3253 break;
3254 case OMP_CLAUSE_SHARED:
3255 /* Ignore shared directives in teams construct. */
3256 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3257 continue;
3258 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3260 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3261 continue;
3263 case OMP_CLAUSE_FIRSTPRIVATE:
3264 case OMP_CLAUSE_COPYIN:
3265 case OMP_CLAUSE_LINEAR:
3266 break;
3267 case OMP_CLAUSE_REDUCTION:
3268 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3269 reduction_omp_orig_ref = true;
3270 break;
3271 case OMP_CLAUSE__LOOPTEMP_:
3272 /* Handle _looptemp_ clauses only on parallel. */
3273 if (fd)
3274 continue;
3275 break;
3276 case OMP_CLAUSE_LASTPRIVATE:
3277 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3279 lastprivate_firstprivate = true;
3280 if (pass != 0)
3281 continue;
3283 /* Even without corresponding firstprivate, if
3284 decl is Fortran allocatable, it needs outer var
3285 reference. */
3286 else if (pass == 0
3287 && lang_hooks.decls.omp_private_outer_ref
3288 (OMP_CLAUSE_DECL (c)))
3289 lastprivate_firstprivate = true;
3290 break;
3291 case OMP_CLAUSE_ALIGNED:
3292 if (pass == 0)
3293 continue;
3294 var = OMP_CLAUSE_DECL (c);
3295 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3296 && !is_global_var (var))
3298 new_var = maybe_lookup_decl (var, ctx);
3299 if (new_var == NULL_TREE)
3300 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3301 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3302 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3303 omp_clause_aligned_alignment (c));
3304 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3305 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3306 gimplify_and_add (x, ilist);
3308 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3309 && is_global_var (var))
3311 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3312 new_var = lookup_decl (var, ctx);
3313 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3314 t = build_fold_addr_expr_loc (clause_loc, t);
3315 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3316 t = build_call_expr_loc (clause_loc, t2, 2, t,
3317 omp_clause_aligned_alignment (c));
3318 t = fold_convert_loc (clause_loc, ptype, t);
3319 x = create_tmp_var (ptype, NULL);
3320 t = build2 (MODIFY_EXPR, ptype, x, t);
3321 gimplify_and_add (t, ilist);
3322 t = build_simple_mem_ref_loc (clause_loc, x);
3323 SET_DECL_VALUE_EXPR (new_var, t);
3324 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3326 continue;
3327 default:
3328 continue;
3331 new_var = var = OMP_CLAUSE_DECL (c);
3332 if (c_kind != OMP_CLAUSE_COPYIN)
3333 new_var = lookup_decl (var, ctx);
3335 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3337 if (pass != 0)
3338 continue;
3340 else if (is_variable_sized (var))
3342 /* For variable sized types, we need to allocate the
3343 actual storage here. Call alloca and store the
3344 result in the pointer decl that we created elsewhere. */
3345 if (pass == 0)
3346 continue;
3348 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3350 gimple_call stmt;
3351 tree tmp, atmp;
3353 ptr = DECL_VALUE_EXPR (new_var);
3354 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3355 ptr = TREE_OPERAND (ptr, 0);
3356 gcc_assert (DECL_P (ptr));
3357 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3359 /* void *tmp = __builtin_alloca */
3360 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3361 stmt = gimple_build_call (atmp, 1, x);
3362 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3363 gimple_add_tmp_var (tmp);
3364 gimple_call_set_lhs (stmt, tmp);
3366 gimple_seq_add_stmt (ilist, stmt);
3368 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3369 gimplify_assign (ptr, x, ilist);
3372 else if (is_reference (var))
3374 /* For references that are being privatized for Fortran,
3375 allocate new backing storage for the new pointer
3376 variable. This allows us to avoid changing all the
3377 code that expects a pointer to something that expects
3378 a direct variable. */
3379 if (pass == 0)
3380 continue;
3382 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3383 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3385 x = build_receiver_ref (var, false, ctx);
3386 x = build_fold_addr_expr_loc (clause_loc, x);
3388 else if (TREE_CONSTANT (x))
3390 /* For reduction in SIMD loop, defer adding the
3391 initialization of the reference, because if we decide
3392 to use SIMD array for it, the initilization could cause
3393 expansion ICE. */
3394 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3395 x = NULL_TREE;
3396 else
3398 const char *name = NULL;
3399 if (DECL_NAME (var))
3400 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3402 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3403 name);
3404 gimple_add_tmp_var (x);
3405 TREE_ADDRESSABLE (x) = 1;
3406 x = build_fold_addr_expr_loc (clause_loc, x);
3409 else
3411 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3412 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3415 if (x)
3417 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3418 gimplify_assign (new_var, x, ilist);
3421 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3423 else if (c_kind == OMP_CLAUSE_REDUCTION
3424 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3426 if (pass == 0)
3427 continue;
3429 else if (pass != 0)
3430 continue;
3432 switch (OMP_CLAUSE_CODE (c))
3434 case OMP_CLAUSE_SHARED:
3435 /* Ignore shared directives in teams construct. */
3436 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3437 continue;
3438 /* Shared global vars are just accessed directly. */
3439 if (is_global_var (new_var))
3440 break;
3441 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3442 needs to be delayed until after fixup_child_record_type so
3443 that we get the correct type during the dereference. */
3444 by_ref = use_pointer_for_field (var, ctx);
3445 x = build_receiver_ref (var, by_ref, ctx);
3446 SET_DECL_VALUE_EXPR (new_var, x);
3447 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3449 /* ??? If VAR is not passed by reference, and the variable
3450 hasn't been initialized yet, then we'll get a warning for
3451 the store into the omp_data_s structure. Ideally, we'd be
3452 able to notice this and not store anything at all, but
3453 we're generating code too early. Suppress the warning. */
3454 if (!by_ref)
3455 TREE_NO_WARNING (var) = 1;
3456 break;
3458 case OMP_CLAUSE_LASTPRIVATE:
3459 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3460 break;
3461 /* FALLTHRU */
3463 case OMP_CLAUSE_PRIVATE:
3464 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3465 x = build_outer_var_ref (var, ctx);
3466 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3468 if (is_task_ctx (ctx))
3469 x = build_receiver_ref (var, false, ctx);
3470 else
3471 x = build_outer_var_ref (var, ctx);
3473 else
3474 x = NULL;
3475 do_private:
3476 tree nx;
3477 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3478 if (is_simd)
3480 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3481 if ((TREE_ADDRESSABLE (new_var) || nx || y
3482 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3483 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3484 idx, lane, ivar, lvar))
3486 if (nx)
3487 x = lang_hooks.decls.omp_clause_default_ctor
3488 (c, unshare_expr (ivar), x);
3489 if (nx && x)
3490 gimplify_and_add (x, &llist[0]);
3491 if (y)
3493 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3494 if (y)
3496 gimple_seq tseq = NULL;
3498 dtor = y;
3499 gimplify_stmt (&dtor, &tseq);
3500 gimple_seq_add_seq (&llist[1], tseq);
3503 break;
3506 if (nx)
3507 gimplify_and_add (nx, ilist);
3508 /* FALLTHRU */
3510 do_dtor:
3511 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3512 if (x)
3514 gimple_seq tseq = NULL;
3516 dtor = x;
3517 gimplify_stmt (&dtor, &tseq);
3518 gimple_seq_add_seq (dlist, tseq);
3520 break;
3522 case OMP_CLAUSE_LINEAR:
3523 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3524 goto do_firstprivate;
3525 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3526 x = NULL;
3527 else
3528 x = build_outer_var_ref (var, ctx);
3529 goto do_private;
3531 case OMP_CLAUSE_FIRSTPRIVATE:
3532 if (is_task_ctx (ctx))
3534 if (is_reference (var) || is_variable_sized (var))
3535 goto do_dtor;
3536 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3537 ctx))
3538 || use_pointer_for_field (var, NULL))
3540 x = build_receiver_ref (var, false, ctx);
3541 SET_DECL_VALUE_EXPR (new_var, x);
3542 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3543 goto do_dtor;
3546 do_firstprivate:
3547 x = build_outer_var_ref (var, ctx);
3548 if (is_simd)
3550 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3551 && gimple_omp_for_combined_into_p (ctx->stmt))
3553 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3554 tree stept = TREE_TYPE (t);
3555 tree ct = find_omp_clause (clauses,
3556 OMP_CLAUSE__LOOPTEMP_);
3557 gcc_assert (ct);
3558 tree l = OMP_CLAUSE_DECL (ct);
3559 tree n1 = fd->loop.n1;
3560 tree step = fd->loop.step;
3561 tree itype = TREE_TYPE (l);
3562 if (POINTER_TYPE_P (itype))
3563 itype = signed_type_for (itype);
3564 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3565 if (TYPE_UNSIGNED (itype)
3566 && fd->loop.cond_code == GT_EXPR)
3567 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3568 fold_build1 (NEGATE_EXPR, itype, l),
3569 fold_build1 (NEGATE_EXPR,
3570 itype, step));
3571 else
3572 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3573 t = fold_build2 (MULT_EXPR, stept,
3574 fold_convert (stept, l), t);
3576 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3578 x = lang_hooks.decls.omp_clause_linear_ctor
3579 (c, new_var, x, t);
3580 gimplify_and_add (x, ilist);
3581 goto do_dtor;
3584 if (POINTER_TYPE_P (TREE_TYPE (x)))
3585 x = fold_build2 (POINTER_PLUS_EXPR,
3586 TREE_TYPE (x), x, t);
3587 else
3588 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3591 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3592 || TREE_ADDRESSABLE (new_var))
3593 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3594 idx, lane, ivar, lvar))
3596 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3598 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3599 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3600 gimplify_and_add (x, ilist);
3601 gimple_stmt_iterator gsi
3602 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3603 gimple g
3604 = gimple_build_assign (unshare_expr (lvar), iv);
3605 gsi_insert_before_without_update (&gsi, g,
3606 GSI_SAME_STMT);
3607 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3608 enum tree_code code = PLUS_EXPR;
3609 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3610 code = POINTER_PLUS_EXPR;
3611 g = gimple_build_assign_with_ops (code, iv, iv, t);
3612 gsi_insert_before_without_update (&gsi, g,
3613 GSI_SAME_STMT);
3614 break;
3616 x = lang_hooks.decls.omp_clause_copy_ctor
3617 (c, unshare_expr (ivar), x);
3618 gimplify_and_add (x, &llist[0]);
3619 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3620 if (x)
3622 gimple_seq tseq = NULL;
3624 dtor = x;
3625 gimplify_stmt (&dtor, &tseq);
3626 gimple_seq_add_seq (&llist[1], tseq);
3628 break;
3631 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3632 gimplify_and_add (x, ilist);
3633 goto do_dtor;
3635 case OMP_CLAUSE__LOOPTEMP_:
3636 gcc_assert (is_parallel_ctx (ctx));
3637 x = build_outer_var_ref (var, ctx);
3638 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3639 gimplify_and_add (x, ilist);
3640 break;
3642 case OMP_CLAUSE_COPYIN:
3643 by_ref = use_pointer_for_field (var, NULL);
3644 x = build_receiver_ref (var, by_ref, ctx);
3645 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3646 append_to_statement_list (x, &copyin_seq);
3647 copyin_by_ref |= by_ref;
3648 break;
3650 case OMP_CLAUSE_REDUCTION:
3651 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3653 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3654 gimple tseq;
3655 x = build_outer_var_ref (var, ctx);
3657 if (is_reference (var)
3658 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3659 TREE_TYPE (x)))
3660 x = build_fold_addr_expr_loc (clause_loc, x);
3661 SET_DECL_VALUE_EXPR (placeholder, x);
3662 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3663 tree new_vard = new_var;
3664 if (is_reference (var))
3666 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3667 new_vard = TREE_OPERAND (new_var, 0);
3668 gcc_assert (DECL_P (new_vard));
3670 if (is_simd
3671 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3672 idx, lane, ivar, lvar))
3674 if (new_vard == new_var)
3676 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3677 SET_DECL_VALUE_EXPR (new_var, ivar);
3679 else
3681 SET_DECL_VALUE_EXPR (new_vard,
3682 build_fold_addr_expr (ivar));
3683 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3685 x = lang_hooks.decls.omp_clause_default_ctor
3686 (c, unshare_expr (ivar),
3687 build_outer_var_ref (var, ctx));
3688 if (x)
3689 gimplify_and_add (x, &llist[0]);
3690 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3692 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3693 lower_omp (&tseq, ctx);
3694 gimple_seq_add_seq (&llist[0], tseq);
3696 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3697 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3698 lower_omp (&tseq, ctx);
3699 gimple_seq_add_seq (&llist[1], tseq);
3700 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3701 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3702 if (new_vard == new_var)
3703 SET_DECL_VALUE_EXPR (new_var, lvar);
3704 else
3705 SET_DECL_VALUE_EXPR (new_vard,
3706 build_fold_addr_expr (lvar));
3707 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3708 if (x)
3710 tseq = NULL;
3711 dtor = x;
3712 gimplify_stmt (&dtor, &tseq);
3713 gimple_seq_add_seq (&llist[1], tseq);
3715 break;
3717 /* If this is a reference to constant size reduction var
3718 with placeholder, we haven't emitted the initializer
3719 for it because it is undesirable if SIMD arrays are used.
3720 But if they aren't used, we need to emit the deferred
3721 initialization now. */
3722 else if (is_reference (var) && is_simd)
3723 handle_simd_reference (clause_loc, new_vard, ilist);
3724 x = lang_hooks.decls.omp_clause_default_ctor
3725 (c, unshare_expr (new_var),
3726 build_outer_var_ref (var, ctx));
3727 if (x)
3728 gimplify_and_add (x, ilist);
3729 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3731 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3732 lower_omp (&tseq, ctx);
3733 gimple_seq_add_seq (ilist, tseq);
3735 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3736 if (is_simd)
3738 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3739 lower_omp (&tseq, ctx);
3740 gimple_seq_add_seq (dlist, tseq);
3741 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3743 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3744 goto do_dtor;
3746 else
3748 x = omp_reduction_init (c, TREE_TYPE (new_var));
3749 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3750 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3752 /* reduction(-:var) sums up the partial results, so it
3753 acts identically to reduction(+:var). */
3754 if (code == MINUS_EXPR)
3755 code = PLUS_EXPR;
3757 tree new_vard = new_var;
3758 if (is_simd && is_reference (var))
3760 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3761 new_vard = TREE_OPERAND (new_var, 0);
3762 gcc_assert (DECL_P (new_vard));
3764 if (is_simd
3765 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3766 idx, lane, ivar, lvar))
3768 tree ref = build_outer_var_ref (var, ctx);
3770 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3772 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3773 ref = build_outer_var_ref (var, ctx);
3774 gimplify_assign (ref, x, &llist[1]);
3776 if (new_vard != new_var)
3778 SET_DECL_VALUE_EXPR (new_vard,
3779 build_fold_addr_expr (lvar));
3780 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3783 else
3785 if (is_reference (var) && is_simd)
3786 handle_simd_reference (clause_loc, new_vard, ilist);
3787 gimplify_assign (new_var, x, ilist);
3788 if (is_simd)
3790 tree ref = build_outer_var_ref (var, ctx);
3792 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3793 ref = build_outer_var_ref (var, ctx);
3794 gimplify_assign (ref, x, dlist);
3798 break;
3800 default:
3801 gcc_unreachable ();
3806 if (lane)
3808 tree uid = create_tmp_var (ptr_type_node, "simduid");
3809 /* Don't want uninit warnings on simduid, it is always uninitialized,
3810 but we use it not for the value, but for the DECL_UID only. */
3811 TREE_NO_WARNING (uid) = 1;
3812 gimple g
3813 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3814 gimple_call_set_lhs (g, lane);
3815 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3816 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3817 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3818 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3819 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3820 gimple_omp_for_set_clauses (ctx->stmt, c);
3821 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3822 build_int_cst (unsigned_type_node, 0),
3823 NULL_TREE);
3824 gimple_seq_add_stmt (ilist, g);
3825 for (int i = 0; i < 2; i++)
3826 if (llist[i])
3828 tree vf = create_tmp_var (unsigned_type_node, NULL);
3829 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3830 gimple_call_set_lhs (g, vf);
3831 gimple_seq *seq = i == 0 ? ilist : dlist;
3832 gimple_seq_add_stmt (seq, g);
3833 tree t = build_int_cst (unsigned_type_node, 0);
3834 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3835 gimple_seq_add_stmt (seq, g);
3836 tree body = create_artificial_label (UNKNOWN_LOCATION);
3837 tree header = create_artificial_label (UNKNOWN_LOCATION);
3838 tree end = create_artificial_label (UNKNOWN_LOCATION);
3839 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3840 gimple_seq_add_stmt (seq, gimple_build_label (body));
3841 gimple_seq_add_seq (seq, llist[i]);
3842 t = build_int_cst (unsigned_type_node, 1);
3843 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3844 gimple_seq_add_stmt (seq, g);
3845 gimple_seq_add_stmt (seq, gimple_build_label (header));
3846 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3847 gimple_seq_add_stmt (seq, g);
3848 gimple_seq_add_stmt (seq, gimple_build_label (end));
3852 /* The copyin sequence is not to be executed by the main thread, since
3853 that would result in self-copies. Perhaps not visible to scalars,
3854 but it certainly is to C++ operator=. */
3855 if (copyin_seq)
3857 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3859 x = build2 (NE_EXPR, boolean_type_node, x,
3860 build_int_cst (TREE_TYPE (x), 0));
3861 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3862 gimplify_and_add (x, ilist);
3865 /* If any copyin variable is passed by reference, we must ensure the
3866 master thread doesn't modify it before it is copied over in all
3867 threads. Similarly for variables in both firstprivate and
3868 lastprivate clauses we need to ensure the lastprivate copying
3869 happens after firstprivate copying in all threads. And similarly
3870 for UDRs if initializer expression refers to omp_orig. */
3871 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3873 /* Don't add any barrier for #pragma omp simd or
3874 #pragma omp distribute. */
3875 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3876 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
3877 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3880 /* If max_vf is non-zero, then we can use only a vectorization factor
3881 up to the max_vf we chose. So stick it into the safelen clause. */
3882 if (max_vf)
3884 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3885 OMP_CLAUSE_SAFELEN);
3886 if (c == NULL_TREE
3887 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
3888 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3889 max_vf) == 1))
3891 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3892 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3893 max_vf);
3894 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3895 gimple_omp_for_set_clauses (ctx->stmt, c);
3901 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3902 both parallel and workshare constructs. PREDICATE may be NULL if it's
3903 always true. */
3905 static void
3906 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3907 omp_context *ctx)
3909 tree x, c, label = NULL, orig_clauses = clauses;
3910 bool par_clauses = false;
3911 tree simduid = NULL, lastlane = NULL;
3913 /* Early exit if there are no lastprivate or linear clauses. */
3914 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3915 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3916 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3917 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3918 break;
3919 if (clauses == NULL)
3921 /* If this was a workshare clause, see if it had been combined
3922 with its parallel. In that case, look for the clauses on the
3923 parallel statement itself. */
3924 if (is_parallel_ctx (ctx))
3925 return;
3927 ctx = ctx->outer;
3928 if (ctx == NULL || !is_parallel_ctx (ctx))
3929 return;
3931 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3932 OMP_CLAUSE_LASTPRIVATE);
3933 if (clauses == NULL)
3934 return;
3935 par_clauses = true;
3938 if (predicate)
3940 gimple stmt;
3941 tree label_true, arm1, arm2;
3943 label = create_artificial_label (UNKNOWN_LOCATION);
3944 label_true = create_artificial_label (UNKNOWN_LOCATION);
3945 arm1 = TREE_OPERAND (predicate, 0);
3946 arm2 = TREE_OPERAND (predicate, 1);
3947 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3948 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3949 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3950 label_true, label);
3951 gimple_seq_add_stmt (stmt_list, stmt);
3952 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3955 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3956 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3958 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3959 if (simduid)
3960 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3963 for (c = clauses; c ;)
3965 tree var, new_var;
3966 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3968 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3969 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3970 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3972 var = OMP_CLAUSE_DECL (c);
3973 new_var = lookup_decl (var, ctx);
3975 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3977 tree val = DECL_VALUE_EXPR (new_var);
3978 if (TREE_CODE (val) == ARRAY_REF
3979 && VAR_P (TREE_OPERAND (val, 0))
3980 && lookup_attribute ("omp simd array",
3981 DECL_ATTRIBUTES (TREE_OPERAND (val,
3982 0))))
3984 if (lastlane == NULL)
3986 lastlane = create_tmp_var (unsigned_type_node, NULL);
3987 gimple g
3988 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3989 2, simduid,
3990 TREE_OPERAND (val, 1));
3991 gimple_call_set_lhs (g, lastlane);
3992 gimple_seq_add_stmt (stmt_list, g);
3994 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3995 TREE_OPERAND (val, 0), lastlane,
3996 NULL_TREE, NULL_TREE);
4000 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
4001 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
4003 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
4004 gimple_seq_add_seq (stmt_list,
4005 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
4006 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
4008 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4009 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
4011 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
4012 gimple_seq_add_seq (stmt_list,
4013 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
4014 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
4017 x = build_outer_var_ref (var, ctx);
4018 if (is_reference (var))
4019 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4020 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
4021 gimplify_and_add (x, stmt_list);
4023 c = OMP_CLAUSE_CHAIN (c);
4024 if (c == NULL && !par_clauses)
4026 /* If this was a workshare clause, see if it had been combined
4027 with its parallel. In that case, continue looking for the
4028 clauses also on the parallel statement itself. */
4029 if (is_parallel_ctx (ctx))
4030 break;
4032 ctx = ctx->outer;
4033 if (ctx == NULL || !is_parallel_ctx (ctx))
4034 break;
4036 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
4037 OMP_CLAUSE_LASTPRIVATE);
4038 par_clauses = true;
4042 if (label)
4043 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
4047 /* Generate code to implement the REDUCTION clauses. */
4049 static void
4050 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
4052 gimple_seq sub_seq = NULL;
4053 gimple stmt;
4054 tree x, c;
4055 int count = 0;
4057 /* SIMD reductions are handled in lower_rec_input_clauses. */
4058 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4059 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
4060 return;
4062 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
4063 update in that case, otherwise use a lock. */
4064 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
4065 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
4067 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4069 /* Never use OMP_ATOMIC for array reductions or UDRs. */
4070 count = -1;
4071 break;
4073 count++;
4076 if (count == 0)
4077 return;
4079 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4081 tree var, ref, new_var;
4082 enum tree_code code;
4083 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4085 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
4086 continue;
4088 var = OMP_CLAUSE_DECL (c);
4089 new_var = lookup_decl (var, ctx);
4090 if (is_reference (var))
4091 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4092 ref = build_outer_var_ref (var, ctx);
4093 code = OMP_CLAUSE_REDUCTION_CODE (c);
4095 /* reduction(-:var) sums up the partial results, so it acts
4096 identically to reduction(+:var). */
4097 if (code == MINUS_EXPR)
4098 code = PLUS_EXPR;
4100 if (count == 1)
4102 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
4104 addr = save_expr (addr);
4105 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
4106 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
4107 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
4108 gimplify_and_add (x, stmt_seqp);
4109 return;
4112 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4114 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4116 if (is_reference (var)
4117 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4118 TREE_TYPE (ref)))
4119 ref = build_fold_addr_expr_loc (clause_loc, ref);
4120 SET_DECL_VALUE_EXPR (placeholder, ref);
4121 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4122 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
4123 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
4124 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4125 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
4127 else
4129 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4130 ref = build_outer_var_ref (var, ctx);
4131 gimplify_assign (ref, x, &sub_seq);
4135 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4137 gimple_seq_add_stmt (stmt_seqp, stmt);
4139 gimple_seq_add_seq (stmt_seqp, sub_seq);
4141 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4143 gimple_seq_add_stmt (stmt_seqp, stmt);
4147 /* Generate code to implement the COPYPRIVATE clauses. */
4149 static void
4150 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4151 omp_context *ctx)
4153 tree c;
4155 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4157 tree var, new_var, ref, x;
4158 bool by_ref;
4159 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4161 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4162 continue;
4164 var = OMP_CLAUSE_DECL (c);
4165 by_ref = use_pointer_for_field (var, NULL);
4167 ref = build_sender_ref (var, ctx);
4168 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4169 if (by_ref)
4171 x = build_fold_addr_expr_loc (clause_loc, new_var);
4172 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4174 gimplify_assign (ref, x, slist);
4176 ref = build_receiver_ref (var, false, ctx);
4177 if (by_ref)
4179 ref = fold_convert_loc (clause_loc,
4180 build_pointer_type (TREE_TYPE (new_var)),
4181 ref);
4182 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4184 if (is_reference (var))
4186 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4187 ref = build_simple_mem_ref_loc (clause_loc, ref);
4188 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4190 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4191 gimplify_and_add (x, rlist);
4196 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4197 and REDUCTION from the sender (aka parent) side. */
4199 static void
4200 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4201 omp_context *ctx)
4203 tree c;
4205 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4207 tree val, ref, x, var;
4208 bool by_ref, do_in = false, do_out = false;
4209 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4211 switch (OMP_CLAUSE_CODE (c))
4213 case OMP_CLAUSE_PRIVATE:
4214 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4215 break;
4216 continue;
4217 case OMP_CLAUSE_FIRSTPRIVATE:
4218 case OMP_CLAUSE_COPYIN:
4219 case OMP_CLAUSE_LASTPRIVATE:
4220 case OMP_CLAUSE_REDUCTION:
4221 case OMP_CLAUSE__LOOPTEMP_:
4222 break;
4223 default:
4224 continue;
4227 val = OMP_CLAUSE_DECL (c);
4228 var = lookup_decl_in_outer_ctx (val, ctx);
4230 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4231 && is_global_var (var))
4232 continue;
4233 if (is_variable_sized (val))
4234 continue;
4235 by_ref = use_pointer_for_field (val, NULL);
4237 switch (OMP_CLAUSE_CODE (c))
4239 case OMP_CLAUSE_PRIVATE:
4240 case OMP_CLAUSE_FIRSTPRIVATE:
4241 case OMP_CLAUSE_COPYIN:
4242 case OMP_CLAUSE__LOOPTEMP_:
4243 do_in = true;
4244 break;
4246 case OMP_CLAUSE_LASTPRIVATE:
4247 if (by_ref || is_reference (val))
4249 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4250 continue;
4251 do_in = true;
4253 else
4255 do_out = true;
4256 if (lang_hooks.decls.omp_private_outer_ref (val))
4257 do_in = true;
4259 break;
4261 case OMP_CLAUSE_REDUCTION:
4262 do_in = true;
4263 do_out = !(by_ref || is_reference (val));
4264 break;
4266 default:
4267 gcc_unreachable ();
4270 if (do_in)
4272 ref = build_sender_ref (val, ctx);
4273 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4274 gimplify_assign (ref, x, ilist);
4275 if (is_task_ctx (ctx))
4276 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4279 if (do_out)
4281 ref = build_sender_ref (val, ctx);
4282 gimplify_assign (var, ref, olist);
4287 /* Generate code to implement SHARED from the sender (aka parent)
4288 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4289 list things that got automatically shared. */
4291 static void
4292 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4294 tree var, ovar, nvar, f, x, record_type;
4296 if (ctx->record_type == NULL)
4297 return;
4299 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4300 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4302 ovar = DECL_ABSTRACT_ORIGIN (f);
4303 nvar = maybe_lookup_decl (ovar, ctx);
4304 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4305 continue;
4307 /* If CTX is a nested parallel directive. Find the immediately
4308 enclosing parallel or workshare construct that contains a
4309 mapping for OVAR. */
4310 var = lookup_decl_in_outer_ctx (ovar, ctx);
4312 if (use_pointer_for_field (ovar, ctx))
4314 x = build_sender_ref (ovar, ctx);
4315 var = build_fold_addr_expr (var);
4316 gimplify_assign (x, var, ilist);
4318 else
4320 x = build_sender_ref (ovar, ctx);
4321 gimplify_assign (x, var, ilist);
4323 if (!TREE_READONLY (var)
4324 /* We don't need to receive a new reference to a result
4325 or parm decl. In fact we may not store to it as we will
4326 invalidate any pending RSO and generate wrong gimple
4327 during inlining. */
4328 && !((TREE_CODE (var) == RESULT_DECL
4329 || TREE_CODE (var) == PARM_DECL)
4330 && DECL_BY_REFERENCE (var)))
4332 x = build_sender_ref (ovar, ctx);
4333 gimplify_assign (var, x, olist);
4340 /* A convenience function to build an empty GIMPLE_COND with just the
4341 condition. */
4343 static gimple
4344 gimple_build_cond_empty (tree cond)
4346 enum tree_code pred_code;
4347 tree lhs, rhs;
4349 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4350 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4354 /* Build the function calls to GOMP_parallel_start etc to actually
4355 generate the parallel operation. REGION is the parallel region
4356 being expanded. BB is the block where to insert the code. WS_ARGS
4357 will be set if this is a call to a combined parallel+workshare
4358 construct, it contains the list of additional arguments needed by
4359 the workshare construct. */
4361 static void
4362 expand_parallel_call (struct omp_region *region, basic_block bb,
4363 gimple entry_stmt, vec<tree, va_gc> *ws_args)
4365 tree t, t1, t2, val, cond, c, clauses, flags;
4366 gimple_stmt_iterator gsi;
4367 gimple stmt;
4368 enum built_in_function start_ix;
4369 int start_ix2;
4370 location_t clause_loc;
4371 vec<tree, va_gc> *args;
4373 clauses = gimple_omp_parallel_clauses (entry_stmt);
4375 /* Determine what flavor of GOMP_parallel we will be
4376 emitting. */
4377 start_ix = BUILT_IN_GOMP_PARALLEL;
4378 if (is_combined_parallel (region))
4380 switch (region->inner->type)
4382 case GIMPLE_OMP_FOR:
4383 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4384 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4385 + (region->inner->sched_kind
4386 == OMP_CLAUSE_SCHEDULE_RUNTIME
4387 ? 3 : region->inner->sched_kind));
4388 start_ix = (enum built_in_function)start_ix2;
4389 break;
4390 case GIMPLE_OMP_SECTIONS:
4391 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4392 break;
4393 default:
4394 gcc_unreachable ();
4398 /* By default, the value of NUM_THREADS is zero (selected at run time)
4399 and there is no conditional. */
4400 cond = NULL_TREE;
4401 val = build_int_cst (unsigned_type_node, 0);
4402 flags = build_int_cst (unsigned_type_node, 0);
4404 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4405 if (c)
4406 cond = OMP_CLAUSE_IF_EXPR (c);
4408 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4409 if (c)
4411 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4412 clause_loc = OMP_CLAUSE_LOCATION (c);
4414 else
4415 clause_loc = gimple_location (entry_stmt);
4417 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4418 if (c)
4419 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4421 /* Ensure 'val' is of the correct type. */
4422 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4424 /* If we found the clause 'if (cond)', build either
4425 (cond != 0) or (cond ? val : 1u). */
4426 if (cond)
4428 cond = gimple_boolify (cond);
4430 if (integer_zerop (val))
4431 val = fold_build2_loc (clause_loc,
4432 EQ_EXPR, unsigned_type_node, cond,
4433 build_int_cst (TREE_TYPE (cond), 0));
4434 else
4436 basic_block cond_bb, then_bb, else_bb;
4437 edge e, e_then, e_else;
4438 tree tmp_then, tmp_else, tmp_join, tmp_var;
4440 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4441 if (gimple_in_ssa_p (cfun))
4443 tmp_then = make_ssa_name (tmp_var, NULL);
4444 tmp_else = make_ssa_name (tmp_var, NULL);
4445 tmp_join = make_ssa_name (tmp_var, NULL);
4447 else
4449 tmp_then = tmp_var;
4450 tmp_else = tmp_var;
4451 tmp_join = tmp_var;
4454 e = split_block (bb, NULL);
4455 cond_bb = e->src;
4456 bb = e->dest;
4457 remove_edge (e);
4459 then_bb = create_empty_bb (cond_bb);
4460 else_bb = create_empty_bb (then_bb);
4461 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4462 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4464 stmt = gimple_build_cond_empty (cond);
4465 gsi = gsi_start_bb (cond_bb);
4466 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4468 gsi = gsi_start_bb (then_bb);
4469 stmt = gimple_build_assign (tmp_then, val);
4470 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4472 gsi = gsi_start_bb (else_bb);
4473 stmt = gimple_build_assign
4474 (tmp_else, build_int_cst (unsigned_type_node, 1));
4475 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4477 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4478 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4479 add_bb_to_loop (then_bb, cond_bb->loop_father);
4480 add_bb_to_loop (else_bb, cond_bb->loop_father);
4481 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4482 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4484 if (gimple_in_ssa_p (cfun))
4486 gimple_phi phi = create_phi_node (tmp_join, bb);
4487 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4488 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4491 val = tmp_join;
4494 gsi = gsi_start_bb (bb);
4495 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4496 false, GSI_CONTINUE_LINKING);
4499 gsi = gsi_last_bb (bb);
4500 t = gimple_omp_parallel_data_arg (entry_stmt);
4501 if (t == NULL)
4502 t1 = null_pointer_node;
4503 else
4504 t1 = build_fold_addr_expr (t);
4505 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4507 vec_alloc (args, 4 + vec_safe_length (ws_args));
4508 args->quick_push (t2);
4509 args->quick_push (t1);
4510 args->quick_push (val);
4511 if (ws_args)
4512 args->splice (*ws_args);
4513 args->quick_push (flags);
4515 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4516 builtin_decl_explicit (start_ix), args);
4518 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4519 false, GSI_CONTINUE_LINKING);
4522 /* Insert a function call whose name is FUNC_NAME with the information from
4523 ENTRY_STMT into the basic_block BB. */
4525 static void
4526 expand_cilk_for_call (basic_block bb, gimple entry_stmt,
4527 vec <tree, va_gc> *ws_args)
4529 tree t, t1, t2;
4530 gimple_stmt_iterator gsi;
4531 vec <tree, va_gc> *args;
4533 gcc_assert (vec_safe_length (ws_args) == 2);
4534 tree func_name = (*ws_args)[0];
4535 tree grain = (*ws_args)[1];
4537 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
4538 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
4539 gcc_assert (count != NULL_TREE);
4540 count = OMP_CLAUSE_OPERAND (count, 0);
4542 gsi = gsi_last_bb (bb);
4543 t = gimple_omp_parallel_data_arg (entry_stmt);
4544 if (t == NULL)
4545 t1 = null_pointer_node;
4546 else
4547 t1 = build_fold_addr_expr (t);
4548 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4550 vec_alloc (args, 4);
4551 args->quick_push (t2);
4552 args->quick_push (t1);
4553 args->quick_push (count);
4554 args->quick_push (grain);
4555 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
4557 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
4558 GSI_CONTINUE_LINKING);
4561 /* Build the function call to GOMP_task to actually
4562 generate the task operation. BB is the block where to insert the code. */
4564 static void
4565 expand_task_call (basic_block bb, gimple entry_stmt)
4567 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4568 gimple_stmt_iterator gsi;
4569 location_t loc = gimple_location (entry_stmt);
4571 clauses = gimple_omp_task_clauses (entry_stmt);
4573 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4574 if (c)
4575 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4576 else
4577 cond = boolean_true_node;
4579 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4580 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4581 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4582 flags = build_int_cst (unsigned_type_node,
4583 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4585 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4586 if (c)
4588 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4589 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4590 build_int_cst (unsigned_type_node, 2),
4591 build_int_cst (unsigned_type_node, 0));
4592 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4594 if (depend)
4595 depend = OMP_CLAUSE_DECL (depend);
4596 else
4597 depend = build_int_cst (ptr_type_node, 0);
4599 gsi = gsi_last_bb (bb);
4600 t = gimple_omp_task_data_arg (entry_stmt);
4601 if (t == NULL)
4602 t2 = null_pointer_node;
4603 else
4604 t2 = build_fold_addr_expr_loc (loc, t);
4605 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4606 t = gimple_omp_task_copy_fn (entry_stmt);
4607 if (t == NULL)
4608 t3 = null_pointer_node;
4609 else
4610 t3 = build_fold_addr_expr_loc (loc, t);
4612 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4613 8, t1, t2, t3,
4614 gimple_omp_task_arg_size (entry_stmt),
4615 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4616 depend);
4618 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4619 false, GSI_CONTINUE_LINKING);
4623 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4624 catch handler and return it. This prevents programs from violating the
4625 structured block semantics with throws. */
4627 static gimple_seq
4628 maybe_catch_exception (gimple_seq body)
4630 gimple g;
4631 tree decl;
4633 if (!flag_exceptions)
4634 return body;
4636 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4637 decl = lang_hooks.eh_protect_cleanup_actions ();
4638 else
4639 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4641 g = gimple_build_eh_must_not_throw (decl);
4642 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4643 GIMPLE_TRY_CATCH);
4645 return gimple_seq_alloc_with_stmt (g);
4648 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4650 static tree
4651 vec2chain (vec<tree, va_gc> *v)
4653 tree chain = NULL_TREE, t;
4654 unsigned ix;
4656 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4658 DECL_CHAIN (t) = chain;
4659 chain = t;
4662 return chain;
4666 /* Remove barriers in REGION->EXIT's block. Note that this is only
4667 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4668 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4669 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4670 removed. */
4672 static void
4673 remove_exit_barrier (struct omp_region *region)
4675 gimple_stmt_iterator gsi;
4676 basic_block exit_bb;
4677 edge_iterator ei;
4678 edge e;
4679 gimple stmt;
4680 int any_addressable_vars = -1;
4682 exit_bb = region->exit;
4684 /* If the parallel region doesn't return, we don't have REGION->EXIT
4685 block at all. */
4686 if (! exit_bb)
4687 return;
4689 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4690 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4691 statements that can appear in between are extremely limited -- no
4692 memory operations at all. Here, we allow nothing at all, so the
4693 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4694 gsi = gsi_last_bb (exit_bb);
4695 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4696 gsi_prev (&gsi);
4697 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4698 return;
4700 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4702 gsi = gsi_last_bb (e->src);
4703 if (gsi_end_p (gsi))
4704 continue;
4705 stmt = gsi_stmt (gsi);
4706 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4707 && !gimple_omp_return_nowait_p (stmt))
4709 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4710 in many cases. If there could be tasks queued, the barrier
4711 might be needed to let the tasks run before some local
4712 variable of the parallel that the task uses as shared
4713 runs out of scope. The task can be spawned either
4714 from within current function (this would be easy to check)
4715 or from some function it calls and gets passed an address
4716 of such a variable. */
4717 if (any_addressable_vars < 0)
4719 gimple parallel_stmt = last_stmt (region->entry);
4720 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4721 tree local_decls, block, decl;
4722 unsigned ix;
4724 any_addressable_vars = 0;
4725 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4726 if (TREE_ADDRESSABLE (decl))
4728 any_addressable_vars = 1;
4729 break;
4731 for (block = gimple_block (stmt);
4732 !any_addressable_vars
4733 && block
4734 && TREE_CODE (block) == BLOCK;
4735 block = BLOCK_SUPERCONTEXT (block))
4737 for (local_decls = BLOCK_VARS (block);
4738 local_decls;
4739 local_decls = DECL_CHAIN (local_decls))
4740 if (TREE_ADDRESSABLE (local_decls))
4742 any_addressable_vars = 1;
4743 break;
4745 if (block == gimple_block (parallel_stmt))
4746 break;
4749 if (!any_addressable_vars)
4750 gimple_omp_return_set_nowait (stmt);
4755 static void
4756 remove_exit_barriers (struct omp_region *region)
4758 if (region->type == GIMPLE_OMP_PARALLEL)
4759 remove_exit_barrier (region);
4761 if (region->inner)
4763 region = region->inner;
4764 remove_exit_barriers (region);
4765 while (region->next)
4767 region = region->next;
4768 remove_exit_barriers (region);
4773 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4774 calls. These can't be declared as const functions, but
4775 within one parallel body they are constant, so they can be
4776 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4777 which are declared const. Similarly for task body, except
4778 that in untied task omp_get_thread_num () can change at any task
4779 scheduling point. */
4781 static void
4782 optimize_omp_library_calls (gimple entry_stmt)
4784 basic_block bb;
4785 gimple_stmt_iterator gsi;
4786 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4787 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4788 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4789 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4790 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4791 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4792 OMP_CLAUSE_UNTIED) != NULL);
4794 FOR_EACH_BB_FN (bb, cfun)
4795 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4797 gimple call = gsi_stmt (gsi);
4798 tree decl;
4800 if (is_gimple_call (call)
4801 && (decl = gimple_call_fndecl (call))
4802 && DECL_EXTERNAL (decl)
4803 && TREE_PUBLIC (decl)
4804 && DECL_INITIAL (decl) == NULL)
4806 tree built_in;
4808 if (DECL_NAME (decl) == thr_num_id)
4810 /* In #pragma omp task untied omp_get_thread_num () can change
4811 during the execution of the task region. */
4812 if (untied_task)
4813 continue;
4814 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4816 else if (DECL_NAME (decl) == num_thr_id)
4817 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4818 else
4819 continue;
4821 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4822 || gimple_call_num_args (call) != 0)
4823 continue;
4825 if (flag_exceptions && !TREE_NOTHROW (decl))
4826 continue;
4828 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4829 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4830 TREE_TYPE (TREE_TYPE (built_in))))
4831 continue;
4833 gimple_call_set_fndecl (call, built_in);
4838 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4839 regimplified. */
4841 static tree
4842 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4844 tree t = *tp;
4846 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4847 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4848 return t;
4850 if (TREE_CODE (t) == ADDR_EXPR)
4851 recompute_tree_invariant_for_addr_expr (t);
4853 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4854 return NULL_TREE;
4857 /* Prepend TO = FROM assignment before *GSI_P. */
4859 static void
4860 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4862 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4863 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4864 true, GSI_SAME_STMT);
4865 gimple stmt = gimple_build_assign (to, from);
4866 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4867 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4868 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4870 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4871 gimple_regimplify_operands (stmt, &gsi);
4875 /* Expand the OpenMP parallel or task directive starting at REGION. */
4877 static void
4878 expand_omp_taskreg (struct omp_region *region)
4880 basic_block entry_bb, exit_bb, new_bb;
4881 struct function *child_cfun;
4882 tree child_fn, block, t;
4883 gimple_stmt_iterator gsi;
4884 gimple entry_stmt, stmt;
4885 edge e;
4886 vec<tree, va_gc> *ws_args;
4888 entry_stmt = last_stmt (region->entry);
4889 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4890 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4892 entry_bb = region->entry;
4893 exit_bb = region->exit;
4895 bool is_cilk_for
4896 = (flag_cilkplus
4897 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
4898 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
4899 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
4901 if (is_cilk_for)
4902 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
4903 and the inner statement contains the name of the built-in function
4904 and grain. */
4905 ws_args = region->inner->ws_args;
4906 else if (is_combined_parallel (region))
4907 ws_args = region->ws_args;
4908 else
4909 ws_args = NULL;
4911 if (child_cfun->cfg)
4913 /* Due to inlining, it may happen that we have already outlined
4914 the region, in which case all we need to do is make the
4915 sub-graph unreachable and emit the parallel call. */
4916 edge entry_succ_e, exit_succ_e;
4918 entry_succ_e = single_succ_edge (entry_bb);
4920 gsi = gsi_last_bb (entry_bb);
4921 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4922 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4923 gsi_remove (&gsi, true);
4925 new_bb = entry_bb;
4926 if (exit_bb)
4928 exit_succ_e = single_succ_edge (exit_bb);
4929 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4931 remove_edge_and_dominated_blocks (entry_succ_e);
4933 else
4935 unsigned srcidx, dstidx, num;
4937 /* If the parallel region needs data sent from the parent
4938 function, then the very first statement (except possible
4939 tree profile counter updates) of the parallel body
4940 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4941 &.OMP_DATA_O is passed as an argument to the child function,
4942 we need to replace it with the argument as seen by the child
4943 function.
4945 In most cases, this will end up being the identity assignment
4946 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4947 a function call that has been inlined, the original PARM_DECL
4948 .OMP_DATA_I may have been converted into a different local
4949 variable. In which case, we need to keep the assignment. */
4950 if (gimple_omp_taskreg_data_arg (entry_stmt))
4952 basic_block entry_succ_bb = single_succ (entry_bb);
4953 tree arg, narg;
4954 gimple parcopy_stmt = NULL;
4956 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4958 gimple stmt;
4960 gcc_assert (!gsi_end_p (gsi));
4961 stmt = gsi_stmt (gsi);
4962 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4963 continue;
4965 if (gimple_num_ops (stmt) == 2)
4967 tree arg = gimple_assign_rhs1 (stmt);
4969 /* We're ignore the subcode because we're
4970 effectively doing a STRIP_NOPS. */
4972 if (TREE_CODE (arg) == ADDR_EXPR
4973 && TREE_OPERAND (arg, 0)
4974 == gimple_omp_taskreg_data_arg (entry_stmt))
4976 parcopy_stmt = stmt;
4977 break;
4982 gcc_assert (parcopy_stmt != NULL);
4983 arg = DECL_ARGUMENTS (child_fn);
4985 if (!gimple_in_ssa_p (cfun))
4987 if (gimple_assign_lhs (parcopy_stmt) == arg)
4988 gsi_remove (&gsi, true);
4989 else
4991 /* ?? Is setting the subcode really necessary ?? */
4992 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4993 gimple_assign_set_rhs1 (parcopy_stmt, arg);
4996 else
4998 /* If we are in ssa form, we must load the value from the default
4999 definition of the argument. That should not be defined now,
5000 since the argument is not used uninitialized. */
5001 gcc_assert (ssa_default_def (cfun, arg) == NULL);
5002 narg = make_ssa_name (arg, gimple_build_nop ());
5003 set_ssa_default_def (cfun, arg, narg);
5004 /* ?? Is setting the subcode really necessary ?? */
5005 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
5006 gimple_assign_set_rhs1 (parcopy_stmt, narg);
5007 update_stmt (parcopy_stmt);
5011 /* Declare local variables needed in CHILD_CFUN. */
5012 block = DECL_INITIAL (child_fn);
5013 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
5014 /* The gimplifier could record temporaries in parallel/task block
5015 rather than in containing function's local_decls chain,
5016 which would mean cgraph missed finalizing them. Do it now. */
5017 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
5018 if (TREE_CODE (t) == VAR_DECL
5019 && TREE_STATIC (t)
5020 && !DECL_EXTERNAL (t))
5021 varpool_node::finalize_decl (t);
5022 DECL_SAVED_TREE (child_fn) = NULL;
5023 /* We'll create a CFG for child_fn, so no gimple body is needed. */
5024 gimple_set_body (child_fn, NULL);
5025 TREE_USED (block) = 1;
5027 /* Reset DECL_CONTEXT on function arguments. */
5028 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
5029 DECL_CONTEXT (t) = child_fn;
5031 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
5032 so that it can be moved to the child function. */
5033 gsi = gsi_last_bb (entry_bb);
5034 stmt = gsi_stmt (gsi);
5035 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
5036 || gimple_code (stmt) == GIMPLE_OMP_TASK));
5037 gsi_remove (&gsi, true);
5038 e = split_block (entry_bb, stmt);
5039 entry_bb = e->dest;
5040 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5042 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
5043 if (exit_bb)
5045 gsi = gsi_last_bb (exit_bb);
5046 gcc_assert (!gsi_end_p (gsi)
5047 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5048 stmt = gimple_build_return (NULL);
5049 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5050 gsi_remove (&gsi, true);
5053 /* Move the parallel region into CHILD_CFUN. */
5055 if (gimple_in_ssa_p (cfun))
5057 init_tree_ssa (child_cfun);
5058 init_ssa_operands (child_cfun);
5059 child_cfun->gimple_df->in_ssa_p = true;
5060 block = NULL_TREE;
5062 else
5063 block = gimple_block (entry_stmt);
5065 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
5066 if (exit_bb)
5067 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
5068 /* When the OMP expansion process cannot guarantee an up-to-date
5069 loop tree arrange for the child function to fixup loops. */
5070 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
5071 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
5073 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
5074 num = vec_safe_length (child_cfun->local_decls);
5075 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
5077 t = (*child_cfun->local_decls)[srcidx];
5078 if (DECL_CONTEXT (t) == cfun->decl)
5079 continue;
5080 if (srcidx != dstidx)
5081 (*child_cfun->local_decls)[dstidx] = t;
5082 dstidx++;
5084 if (dstidx != num)
5085 vec_safe_truncate (child_cfun->local_decls, dstidx);
5087 /* Inform the callgraph about the new function. */
5088 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
5089 cgraph_node::add_new_function (child_fn, true);
5091 /* Fix the callgraph edges for child_cfun. Those for cfun will be
5092 fixed in a following pass. */
5093 push_cfun (child_cfun);
5094 if (optimize)
5095 optimize_omp_library_calls (entry_stmt);
5096 cgraph_edge::rebuild_edges ();
5098 /* Some EH regions might become dead, see PR34608. If
5099 pass_cleanup_cfg isn't the first pass to happen with the
5100 new child, these dead EH edges might cause problems.
5101 Clean them up now. */
5102 if (flag_exceptions)
5104 basic_block bb;
5105 bool changed = false;
5107 FOR_EACH_BB_FN (bb, cfun)
5108 changed |= gimple_purge_dead_eh_edges (bb);
5109 if (changed)
5110 cleanup_tree_cfg ();
5112 if (gimple_in_ssa_p (cfun))
5113 update_ssa (TODO_update_ssa);
5114 pop_cfun ();
5117 /* Emit a library call to launch the children threads. */
5118 if (is_cilk_for)
5119 expand_cilk_for_call (new_bb, entry_stmt, ws_args);
5120 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
5121 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
5122 else
5123 expand_task_call (new_bb, entry_stmt);
5124 if (gimple_in_ssa_p (cfun))
5125 update_ssa (TODO_update_ssa_only_virtuals);
5129 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
5130 of the combined collapse > 1 loop constructs, generate code like:
5131 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
5132 if (cond3 is <)
5133 adj = STEP3 - 1;
5134 else
5135 adj = STEP3 + 1;
5136 count3 = (adj + N32 - N31) / STEP3;
5137 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
5138 if (cond2 is <)
5139 adj = STEP2 - 1;
5140 else
5141 adj = STEP2 + 1;
5142 count2 = (adj + N22 - N21) / STEP2;
5143 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
5144 if (cond1 is <)
5145 adj = STEP1 - 1;
5146 else
5147 adj = STEP1 + 1;
5148 count1 = (adj + N12 - N11) / STEP1;
5149 count = count1 * count2 * count3;
5150 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
5151 count = 0;
5152 and set ZERO_ITER_BB to that bb. If this isn't the outermost
5153 of the combined loop constructs, just initialize COUNTS array
5154 from the _looptemp_ clauses. */
5156 /* NOTE: It *could* be better to moosh all of the BBs together,
5157 creating one larger BB with all the computation and the unexpected
5158 jump at the end. I.e.
5160 bool zero3, zero2, zero1, zero;
5162 zero3 = N32 c3 N31;
5163 count3 = (N32 - N31) /[cl] STEP3;
5164 zero2 = N22 c2 N21;
5165 count2 = (N22 - N21) /[cl] STEP2;
5166 zero1 = N12 c1 N11;
5167 count1 = (N12 - N11) /[cl] STEP1;
5168 zero = zero3 || zero2 || zero1;
5169 count = count1 * count2 * count3;
5170 if (__builtin_expect(zero, false)) goto zero_iter_bb;
5172 After all, we expect the zero=false, and thus we expect to have to
5173 evaluate all of the comparison expressions, so short-circuiting
5174 oughtn't be a win. Since the condition isn't protecting a
5175 denominator, we're not concerned about divide-by-zero, so we can
5176 fully evaluate count even if a numerator turned out to be wrong.
5178 It seems like putting this all together would create much better
5179 scheduling opportunities, and less pressure on the chip's branch
5180 predictor. */
5182 static void
5183 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5184 basic_block &entry_bb, tree *counts,
5185 basic_block &zero_iter_bb, int &first_zero_iter,
5186 basic_block &l2_dom_bb)
5188 tree t, type = TREE_TYPE (fd->loop.v);
5189 gimple stmt;
5190 edge e, ne;
5191 int i;
5193 /* Collapsed loops need work for expansion into SSA form. */
5194 gcc_assert (!gimple_in_ssa_p (cfun));
5196 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5197 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5199 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5200 isn't supposed to be handled, as the inner loop doesn't
5201 use it. */
5202 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5203 OMP_CLAUSE__LOOPTEMP_);
5204 gcc_assert (innerc);
5205 for (i = 0; i < fd->collapse; i++)
5207 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5208 OMP_CLAUSE__LOOPTEMP_);
5209 gcc_assert (innerc);
5210 if (i)
5211 counts[i] = OMP_CLAUSE_DECL (innerc);
5212 else
5213 counts[0] = NULL_TREE;
5215 return;
5218 for (i = 0; i < fd->collapse; i++)
5220 tree itype = TREE_TYPE (fd->loops[i].v);
5222 if (SSA_VAR_P (fd->loop.n2)
5223 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5224 fold_convert (itype, fd->loops[i].n1),
5225 fold_convert (itype, fd->loops[i].n2)))
5226 == NULL_TREE || !integer_onep (t)))
5228 tree n1, n2;
5229 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5230 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5231 true, GSI_SAME_STMT);
5232 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5233 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5234 true, GSI_SAME_STMT);
5235 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5236 NULL_TREE, NULL_TREE);
5237 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5238 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5239 expand_omp_regimplify_p, NULL, NULL)
5240 || walk_tree (gimple_cond_rhs_ptr (stmt),
5241 expand_omp_regimplify_p, NULL, NULL))
5243 *gsi = gsi_for_stmt (stmt);
5244 gimple_regimplify_operands (stmt, gsi);
5246 e = split_block (entry_bb, stmt);
5247 if (zero_iter_bb == NULL)
5249 first_zero_iter = i;
5250 zero_iter_bb = create_empty_bb (entry_bb);
5251 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5252 *gsi = gsi_after_labels (zero_iter_bb);
5253 stmt = gimple_build_assign (fd->loop.n2,
5254 build_zero_cst (type));
5255 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5256 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5257 entry_bb);
5259 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5260 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5261 e->flags = EDGE_TRUE_VALUE;
5262 e->probability = REG_BR_PROB_BASE - ne->probability;
5263 if (l2_dom_bb == NULL)
5264 l2_dom_bb = entry_bb;
5265 entry_bb = e->dest;
5266 *gsi = gsi_last_bb (entry_bb);
5269 if (POINTER_TYPE_P (itype))
5270 itype = signed_type_for (itype);
5271 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5272 ? -1 : 1));
5273 t = fold_build2 (PLUS_EXPR, itype,
5274 fold_convert (itype, fd->loops[i].step), t);
5275 t = fold_build2 (PLUS_EXPR, itype, t,
5276 fold_convert (itype, fd->loops[i].n2));
5277 t = fold_build2 (MINUS_EXPR, itype, t,
5278 fold_convert (itype, fd->loops[i].n1));
5279 /* ?? We could probably use CEIL_DIV_EXPR instead of
5280 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5281 generate the same code in the end because generically we
5282 don't know that the values involved must be negative for
5283 GT?? */
5284 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5285 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5286 fold_build1 (NEGATE_EXPR, itype, t),
5287 fold_build1 (NEGATE_EXPR, itype,
5288 fold_convert (itype,
5289 fd->loops[i].step)));
5290 else
5291 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5292 fold_convert (itype, fd->loops[i].step));
5293 t = fold_convert (type, t);
5294 if (TREE_CODE (t) == INTEGER_CST)
5295 counts[i] = t;
5296 else
5298 counts[i] = create_tmp_reg (type, ".count");
5299 expand_omp_build_assign (gsi, counts[i], t);
5301 if (SSA_VAR_P (fd->loop.n2))
5303 if (i == 0)
5304 t = counts[0];
5305 else
5306 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5307 expand_omp_build_assign (gsi, fd->loop.n2, t);
5313 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5314 T = V;
5315 V3 = N31 + (T % count3) * STEP3;
5316 T = T / count3;
5317 V2 = N21 + (T % count2) * STEP2;
5318 T = T / count2;
5319 V1 = N11 + T * STEP1;
5320 if this loop doesn't have an inner loop construct combined with it.
5321 If it does have an inner loop construct combined with it and the
5322 iteration count isn't known constant, store values from counts array
5323 into its _looptemp_ temporaries instead. */
5325 static void
5326 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5327 tree *counts, gimple inner_stmt, tree startvar)
5329 int i;
5330 if (gimple_omp_for_combined_p (fd->for_stmt))
5332 /* If fd->loop.n2 is constant, then no propagation of the counts
5333 is needed, they are constant. */
5334 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5335 return;
5337 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5338 ? gimple_omp_parallel_clauses (inner_stmt)
5339 : gimple_omp_for_clauses (inner_stmt);
5340 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5341 isn't supposed to be handled, as the inner loop doesn't
5342 use it. */
5343 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5344 gcc_assert (innerc);
5345 for (i = 0; i < fd->collapse; i++)
5347 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5348 OMP_CLAUSE__LOOPTEMP_);
5349 gcc_assert (innerc);
5350 if (i)
5352 tree tem = OMP_CLAUSE_DECL (innerc);
5353 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5354 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5355 false, GSI_CONTINUE_LINKING);
5356 gimple stmt = gimple_build_assign (tem, t);
5357 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5360 return;
5363 tree type = TREE_TYPE (fd->loop.v);
5364 tree tem = create_tmp_reg (type, ".tem");
5365 gimple stmt = gimple_build_assign (tem, startvar);
5366 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5368 for (i = fd->collapse - 1; i >= 0; i--)
5370 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5371 itype = vtype;
5372 if (POINTER_TYPE_P (vtype))
5373 itype = signed_type_for (vtype);
5374 if (i != 0)
5375 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5376 else
5377 t = tem;
5378 t = fold_convert (itype, t);
5379 t = fold_build2 (MULT_EXPR, itype, t,
5380 fold_convert (itype, fd->loops[i].step));
5381 if (POINTER_TYPE_P (vtype))
5382 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5383 else
5384 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5385 t = force_gimple_operand_gsi (gsi, t,
5386 DECL_P (fd->loops[i].v)
5387 && TREE_ADDRESSABLE (fd->loops[i].v),
5388 NULL_TREE, false,
5389 GSI_CONTINUE_LINKING);
5390 stmt = gimple_build_assign (fd->loops[i].v, t);
5391 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5392 if (i != 0)
5394 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5395 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5396 false, GSI_CONTINUE_LINKING);
5397 stmt = gimple_build_assign (tem, t);
5398 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5404 /* Helper function for expand_omp_for_*. Generate code like:
5405 L10:
5406 V3 += STEP3;
5407 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5408 L11:
5409 V3 = N31;
5410 V2 += STEP2;
5411 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5412 L12:
5413 V2 = N21;
5414 V1 += STEP1;
5415 goto BODY_BB; */
5417 static basic_block
5418 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5419 basic_block body_bb)
5421 basic_block last_bb, bb, collapse_bb = NULL;
5422 int i;
5423 gimple_stmt_iterator gsi;
5424 edge e;
5425 tree t;
5426 gimple stmt;
5428 last_bb = cont_bb;
5429 for (i = fd->collapse - 1; i >= 0; i--)
5431 tree vtype = TREE_TYPE (fd->loops[i].v);
5433 bb = create_empty_bb (last_bb);
5434 add_bb_to_loop (bb, last_bb->loop_father);
5435 gsi = gsi_start_bb (bb);
5437 if (i < fd->collapse - 1)
5439 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5440 e->probability = REG_BR_PROB_BASE / 8;
5442 t = fd->loops[i + 1].n1;
5443 t = force_gimple_operand_gsi (&gsi, t,
5444 DECL_P (fd->loops[i + 1].v)
5445 && TREE_ADDRESSABLE (fd->loops[i
5446 + 1].v),
5447 NULL_TREE, false,
5448 GSI_CONTINUE_LINKING);
5449 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5450 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5452 else
5453 collapse_bb = bb;
5455 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5457 if (POINTER_TYPE_P (vtype))
5458 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5459 else
5460 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5461 t = force_gimple_operand_gsi (&gsi, t,
5462 DECL_P (fd->loops[i].v)
5463 && TREE_ADDRESSABLE (fd->loops[i].v),
5464 NULL_TREE, false, GSI_CONTINUE_LINKING);
5465 stmt = gimple_build_assign (fd->loops[i].v, t);
5466 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5468 if (i > 0)
5470 t = fd->loops[i].n2;
5471 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5472 false, GSI_CONTINUE_LINKING);
5473 tree v = fd->loops[i].v;
5474 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5475 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5476 false, GSI_CONTINUE_LINKING);
5477 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5478 stmt = gimple_build_cond_empty (t);
5479 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5480 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5481 e->probability = REG_BR_PROB_BASE * 7 / 8;
5483 else
5484 make_edge (bb, body_bb, EDGE_FALLTHRU);
5485 last_bb = bb;
5488 return collapse_bb;
5492 /* A subroutine of expand_omp_for. Generate code for a parallel
5493 loop with any schedule. Given parameters:
5495 for (V = N1; V cond N2; V += STEP) BODY;
5497 where COND is "<" or ">", we generate pseudocode
5499 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5500 if (more) goto L0; else goto L3;
5502 V = istart0;
5503 iend = iend0;
5505 BODY;
5506 V += STEP;
5507 if (V cond iend) goto L1; else goto L2;
5509 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5512 If this is a combined omp parallel loop, instead of the call to
5513 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5514 If this is gimple_omp_for_combined_p loop, then instead of assigning
5515 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5516 inner GIMPLE_OMP_FOR and V += STEP; and
5517 if (V cond iend) goto L1; else goto L2; are removed.
5519 For collapsed loops, given parameters:
5520 collapse(3)
5521 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5522 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5523 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5524 BODY;
5526 we generate pseudocode
5528 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5529 if (cond3 is <)
5530 adj = STEP3 - 1;
5531 else
5532 adj = STEP3 + 1;
5533 count3 = (adj + N32 - N31) / STEP3;
5534 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5535 if (cond2 is <)
5536 adj = STEP2 - 1;
5537 else
5538 adj = STEP2 + 1;
5539 count2 = (adj + N22 - N21) / STEP2;
5540 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5541 if (cond1 is <)
5542 adj = STEP1 - 1;
5543 else
5544 adj = STEP1 + 1;
5545 count1 = (adj + N12 - N11) / STEP1;
5546 count = count1 * count2 * count3;
5547 goto Z1;
5549 count = 0;
5551 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5552 if (more) goto L0; else goto L3;
5554 V = istart0;
5555 T = V;
5556 V3 = N31 + (T % count3) * STEP3;
5557 T = T / count3;
5558 V2 = N21 + (T % count2) * STEP2;
5559 T = T / count2;
5560 V1 = N11 + T * STEP1;
5561 iend = iend0;
5563 BODY;
5564 V += 1;
5565 if (V < iend) goto L10; else goto L2;
5566 L10:
5567 V3 += STEP3;
5568 if (V3 cond3 N32) goto L1; else goto L11;
5569 L11:
5570 V3 = N31;
5571 V2 += STEP2;
5572 if (V2 cond2 N22) goto L1; else goto L12;
5573 L12:
5574 V2 = N21;
5575 V1 += STEP1;
5576 goto L1;
5578 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5583 static void
5584 expand_omp_for_generic (struct omp_region *region,
5585 struct omp_for_data *fd,
5586 enum built_in_function start_fn,
5587 enum built_in_function next_fn,
5588 gimple inner_stmt)
5590 tree type, istart0, iend0, iend;
5591 tree t, vmain, vback, bias = NULL_TREE;
5592 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5593 basic_block l2_bb = NULL, l3_bb = NULL;
5594 gimple_stmt_iterator gsi;
5595 gimple stmt;
5596 bool in_combined_parallel = is_combined_parallel (region);
5597 bool broken_loop = region->cont == NULL;
5598 edge e, ne;
5599 tree *counts = NULL;
5600 int i;
5602 gcc_assert (!broken_loop || !in_combined_parallel);
5603 gcc_assert (fd->iter_type == long_integer_type_node
5604 || !in_combined_parallel);
5606 type = TREE_TYPE (fd->loop.v);
5607 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5608 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5609 TREE_ADDRESSABLE (istart0) = 1;
5610 TREE_ADDRESSABLE (iend0) = 1;
5612 /* See if we need to bias by LLONG_MIN. */
5613 if (fd->iter_type == long_long_unsigned_type_node
5614 && TREE_CODE (type) == INTEGER_TYPE
5615 && !TYPE_UNSIGNED (type))
5617 tree n1, n2;
5619 if (fd->loop.cond_code == LT_EXPR)
5621 n1 = fd->loop.n1;
5622 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5624 else
5626 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5627 n2 = fd->loop.n1;
5629 if (TREE_CODE (n1) != INTEGER_CST
5630 || TREE_CODE (n2) != INTEGER_CST
5631 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5632 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5635 entry_bb = region->entry;
5636 cont_bb = region->cont;
5637 collapse_bb = NULL;
5638 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5639 gcc_assert (broken_loop
5640 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5641 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5642 l1_bb = single_succ (l0_bb);
5643 if (!broken_loop)
5645 l2_bb = create_empty_bb (cont_bb);
5646 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5647 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5649 else
5650 l2_bb = NULL;
5651 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5652 exit_bb = region->exit;
5654 gsi = gsi_last_bb (entry_bb);
5656 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5657 if (fd->collapse > 1)
5659 int first_zero_iter = -1;
5660 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5662 counts = XALLOCAVEC (tree, fd->collapse);
5663 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5664 zero_iter_bb, first_zero_iter,
5665 l2_dom_bb);
5667 if (zero_iter_bb)
5669 /* Some counts[i] vars might be uninitialized if
5670 some loop has zero iterations. But the body shouldn't
5671 be executed in that case, so just avoid uninit warnings. */
5672 for (i = first_zero_iter; i < fd->collapse; i++)
5673 if (SSA_VAR_P (counts[i]))
5674 TREE_NO_WARNING (counts[i]) = 1;
5675 gsi_prev (&gsi);
5676 e = split_block (entry_bb, gsi_stmt (gsi));
5677 entry_bb = e->dest;
5678 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5679 gsi = gsi_last_bb (entry_bb);
5680 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5681 get_immediate_dominator (CDI_DOMINATORS,
5682 zero_iter_bb));
5685 if (in_combined_parallel)
5687 /* In a combined parallel loop, emit a call to
5688 GOMP_loop_foo_next. */
5689 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5690 build_fold_addr_expr (istart0),
5691 build_fold_addr_expr (iend0));
5693 else
5695 tree t0, t1, t2, t3, t4;
5696 /* If this is not a combined parallel loop, emit a call to
5697 GOMP_loop_foo_start in ENTRY_BB. */
5698 t4 = build_fold_addr_expr (iend0);
5699 t3 = build_fold_addr_expr (istart0);
5700 t2 = fold_convert (fd->iter_type, fd->loop.step);
5701 t1 = fd->loop.n2;
5702 t0 = fd->loop.n1;
5703 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5705 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5706 OMP_CLAUSE__LOOPTEMP_);
5707 gcc_assert (innerc);
5708 t0 = OMP_CLAUSE_DECL (innerc);
5709 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5710 OMP_CLAUSE__LOOPTEMP_);
5711 gcc_assert (innerc);
5712 t1 = OMP_CLAUSE_DECL (innerc);
5714 if (POINTER_TYPE_P (TREE_TYPE (t0))
5715 && TYPE_PRECISION (TREE_TYPE (t0))
5716 != TYPE_PRECISION (fd->iter_type))
5718 /* Avoid casting pointers to integer of a different size. */
5719 tree itype = signed_type_for (type);
5720 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5721 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5723 else
5725 t1 = fold_convert (fd->iter_type, t1);
5726 t0 = fold_convert (fd->iter_type, t0);
5728 if (bias)
5730 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5731 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5733 if (fd->iter_type == long_integer_type_node)
5735 if (fd->chunk_size)
5737 t = fold_convert (fd->iter_type, fd->chunk_size);
5738 t = build_call_expr (builtin_decl_explicit (start_fn),
5739 6, t0, t1, t2, t, t3, t4);
5741 else
5742 t = build_call_expr (builtin_decl_explicit (start_fn),
5743 5, t0, t1, t2, t3, t4);
5745 else
5747 tree t5;
5748 tree c_bool_type;
5749 tree bfn_decl;
5751 /* The GOMP_loop_ull_*start functions have additional boolean
5752 argument, true for < loops and false for > loops.
5753 In Fortran, the C bool type can be different from
5754 boolean_type_node. */
5755 bfn_decl = builtin_decl_explicit (start_fn);
5756 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5757 t5 = build_int_cst (c_bool_type,
5758 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5759 if (fd->chunk_size)
5761 tree bfn_decl = builtin_decl_explicit (start_fn);
5762 t = fold_convert (fd->iter_type, fd->chunk_size);
5763 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5765 else
5766 t = build_call_expr (builtin_decl_explicit (start_fn),
5767 6, t5, t0, t1, t2, t3, t4);
5770 if (TREE_TYPE (t) != boolean_type_node)
5771 t = fold_build2 (NE_EXPR, boolean_type_node,
5772 t, build_int_cst (TREE_TYPE (t), 0));
5773 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5774 true, GSI_SAME_STMT);
5775 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5777 /* Remove the GIMPLE_OMP_FOR statement. */
5778 gsi_remove (&gsi, true);
5780 /* Iteration setup for sequential loop goes in L0_BB. */
5781 tree startvar = fd->loop.v;
5782 tree endvar = NULL_TREE;
5784 if (gimple_omp_for_combined_p (fd->for_stmt))
5786 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5787 && gimple_omp_for_kind (inner_stmt)
5788 == GF_OMP_FOR_KIND_SIMD);
5789 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5790 OMP_CLAUSE__LOOPTEMP_);
5791 gcc_assert (innerc);
5792 startvar = OMP_CLAUSE_DECL (innerc);
5793 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5794 OMP_CLAUSE__LOOPTEMP_);
5795 gcc_assert (innerc);
5796 endvar = OMP_CLAUSE_DECL (innerc);
5799 gsi = gsi_start_bb (l0_bb);
5800 t = istart0;
5801 if (bias)
5802 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5803 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5804 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5805 t = fold_convert (TREE_TYPE (startvar), t);
5806 t = force_gimple_operand_gsi (&gsi, t,
5807 DECL_P (startvar)
5808 && TREE_ADDRESSABLE (startvar),
5809 NULL_TREE, false, GSI_CONTINUE_LINKING);
5810 stmt = gimple_build_assign (startvar, t);
5811 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5813 t = iend0;
5814 if (bias)
5815 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5816 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5817 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5818 t = fold_convert (TREE_TYPE (startvar), t);
5819 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5820 false, GSI_CONTINUE_LINKING);
5821 if (endvar)
5823 stmt = gimple_build_assign (endvar, iend);
5824 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5825 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
5826 stmt = gimple_build_assign (fd->loop.v, iend);
5827 else
5828 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, iend,
5829 NULL_TREE);
5830 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5832 if (fd->collapse > 1)
5833 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5835 if (!broken_loop)
5837 /* Code to control the increment and predicate for the sequential
5838 loop goes in the CONT_BB. */
5839 gsi = gsi_last_bb (cont_bb);
5840 stmt = gsi_stmt (gsi);
5841 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5842 vmain = gimple_omp_continue_control_use (stmt);
5843 vback = gimple_omp_continue_control_def (stmt);
5845 if (!gimple_omp_for_combined_p (fd->for_stmt))
5847 if (POINTER_TYPE_P (type))
5848 t = fold_build_pointer_plus (vmain, fd->loop.step);
5849 else
5850 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5851 t = force_gimple_operand_gsi (&gsi, t,
5852 DECL_P (vback)
5853 && TREE_ADDRESSABLE (vback),
5854 NULL_TREE, true, GSI_SAME_STMT);
5855 stmt = gimple_build_assign (vback, t);
5856 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5858 t = build2 (fd->loop.cond_code, boolean_type_node,
5859 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5860 iend);
5861 stmt = gimple_build_cond_empty (t);
5862 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5865 /* Remove GIMPLE_OMP_CONTINUE. */
5866 gsi_remove (&gsi, true);
5868 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5869 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5871 /* Emit code to get the next parallel iteration in L2_BB. */
5872 gsi = gsi_start_bb (l2_bb);
5874 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5875 build_fold_addr_expr (istart0),
5876 build_fold_addr_expr (iend0));
5877 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5878 false, GSI_CONTINUE_LINKING);
5879 if (TREE_TYPE (t) != boolean_type_node)
5880 t = fold_build2 (NE_EXPR, boolean_type_node,
5881 t, build_int_cst (TREE_TYPE (t), 0));
5882 stmt = gimple_build_cond_empty (t);
5883 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5886 /* Add the loop cleanup function. */
5887 gsi = gsi_last_bb (exit_bb);
5888 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5889 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5890 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5891 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5892 else
5893 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5894 stmt = gimple_build_call (t, 0);
5895 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5896 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5897 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5898 gsi_remove (&gsi, true);
5900 /* Connect the new blocks. */
5901 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5902 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5904 if (!broken_loop)
5906 gimple_seq phis;
5908 e = find_edge (cont_bb, l3_bb);
5909 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5911 phis = phi_nodes (l3_bb);
5912 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5914 gimple phi = gsi_stmt (gsi);
5915 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5916 PHI_ARG_DEF_FROM_EDGE (phi, e));
5918 remove_edge (e);
5920 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5921 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5922 e = find_edge (cont_bb, l1_bb);
5923 if (gimple_omp_for_combined_p (fd->for_stmt))
5925 remove_edge (e);
5926 e = NULL;
5928 else if (fd->collapse > 1)
5930 remove_edge (e);
5931 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5933 else
5934 e->flags = EDGE_TRUE_VALUE;
5935 if (e)
5937 e->probability = REG_BR_PROB_BASE * 7 / 8;
5938 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5940 else
5942 e = find_edge (cont_bb, l2_bb);
5943 e->flags = EDGE_FALLTHRU;
5945 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5947 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5948 recompute_dominator (CDI_DOMINATORS, l2_bb));
5949 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5950 recompute_dominator (CDI_DOMINATORS, l3_bb));
5951 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5952 recompute_dominator (CDI_DOMINATORS, l0_bb));
5953 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5954 recompute_dominator (CDI_DOMINATORS, l1_bb));
5956 struct loop *outer_loop = alloc_loop ();
5957 outer_loop->header = l0_bb;
5958 outer_loop->latch = l2_bb;
5959 add_loop (outer_loop, l0_bb->loop_father);
5961 if (!gimple_omp_for_combined_p (fd->for_stmt))
5963 struct loop *loop = alloc_loop ();
5964 loop->header = l1_bb;
5965 /* The loop may have multiple latches. */
5966 add_loop (loop, outer_loop);
5972 /* A subroutine of expand_omp_for. Generate code for a parallel
5973 loop with static schedule and no specified chunk size. Given
5974 parameters:
5976 for (V = N1; V cond N2; V += STEP) BODY;
5978 where COND is "<" or ">", we generate pseudocode
5980 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5981 if (cond is <)
5982 adj = STEP - 1;
5983 else
5984 adj = STEP + 1;
5985 if ((__typeof (V)) -1 > 0 && cond is >)
5986 n = -(adj + N2 - N1) / -STEP;
5987 else
5988 n = (adj + N2 - N1) / STEP;
5989 q = n / nthreads;
5990 tt = n % nthreads;
5991 if (threadid < tt) goto L3; else goto L4;
5993 tt = 0;
5994 q = q + 1;
5996 s0 = q * threadid + tt;
5997 e0 = s0 + q;
5998 V = s0 * STEP + N1;
5999 if (s0 >= e0) goto L2; else goto L0;
6001 e = e0 * STEP + N1;
6003 BODY;
6004 V += STEP;
6005 if (V cond e) goto L1;
6009 static void
6010 expand_omp_for_static_nochunk (struct omp_region *region,
6011 struct omp_for_data *fd,
6012 gimple inner_stmt)
6014 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
6015 tree type, itype, vmain, vback;
6016 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
6017 basic_block body_bb, cont_bb, collapse_bb = NULL;
6018 basic_block fin_bb;
6019 gimple_stmt_iterator gsi;
6020 gimple stmt;
6021 edge ep;
6022 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6023 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6024 bool broken_loop = region->cont == NULL;
6025 tree *counts = NULL;
6026 tree n1, n2, step;
6028 itype = type = TREE_TYPE (fd->loop.v);
6029 if (POINTER_TYPE_P (type))
6030 itype = signed_type_for (type);
6032 entry_bb = region->entry;
6033 cont_bb = region->cont;
6034 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6035 fin_bb = BRANCH_EDGE (entry_bb)->dest;
6036 gcc_assert (broken_loop
6037 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
6038 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
6039 body_bb = single_succ (seq_start_bb);
6040 if (!broken_loop)
6042 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6043 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6045 exit_bb = region->exit;
6047 /* Iteration space partitioning goes in ENTRY_BB. */
6048 gsi = gsi_last_bb (entry_bb);
6049 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6051 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6053 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6054 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6057 if (fd->collapse > 1)
6059 int first_zero_iter = -1;
6060 basic_block l2_dom_bb = NULL;
6062 counts = XALLOCAVEC (tree, fd->collapse);
6063 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6064 fin_bb, first_zero_iter,
6065 l2_dom_bb);
6066 t = NULL_TREE;
6068 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6069 t = integer_one_node;
6070 else
6071 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6072 fold_convert (type, fd->loop.n1),
6073 fold_convert (type, fd->loop.n2));
6074 if (fd->collapse == 1
6075 && TYPE_UNSIGNED (type)
6076 && (t == NULL_TREE || !integer_onep (t)))
6078 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6079 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6080 true, GSI_SAME_STMT);
6081 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6082 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6083 true, GSI_SAME_STMT);
6084 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6085 NULL_TREE, NULL_TREE);
6086 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6087 if (walk_tree (gimple_cond_lhs_ptr (stmt),
6088 expand_omp_regimplify_p, NULL, NULL)
6089 || walk_tree (gimple_cond_rhs_ptr (stmt),
6090 expand_omp_regimplify_p, NULL, NULL))
6092 gsi = gsi_for_stmt (stmt);
6093 gimple_regimplify_operands (stmt, &gsi);
6095 ep = split_block (entry_bb, stmt);
6096 ep->flags = EDGE_TRUE_VALUE;
6097 entry_bb = ep->dest;
6098 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6099 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
6100 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
6101 if (gimple_in_ssa_p (cfun))
6103 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6104 for (gsi = gsi_start_phis (fin_bb);
6105 !gsi_end_p (gsi); gsi_next (&gsi))
6107 gimple phi = gsi_stmt (gsi);
6108 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6109 ep, UNKNOWN_LOCATION);
6112 gsi = gsi_last_bb (entry_bb);
6115 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6116 t = fold_convert (itype, t);
6117 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6118 true, GSI_SAME_STMT);
6120 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6121 t = fold_convert (itype, t);
6122 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6123 true, GSI_SAME_STMT);
6125 n1 = fd->loop.n1;
6126 n2 = fd->loop.n2;
6127 step = fd->loop.step;
6128 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6130 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6131 OMP_CLAUSE__LOOPTEMP_);
6132 gcc_assert (innerc);
6133 n1 = OMP_CLAUSE_DECL (innerc);
6134 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6135 OMP_CLAUSE__LOOPTEMP_);
6136 gcc_assert (innerc);
6137 n2 = OMP_CLAUSE_DECL (innerc);
6139 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6140 true, NULL_TREE, true, GSI_SAME_STMT);
6141 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6142 true, NULL_TREE, true, GSI_SAME_STMT);
6143 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6144 true, NULL_TREE, true, GSI_SAME_STMT);
6146 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6147 t = fold_build2 (PLUS_EXPR, itype, step, t);
6148 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6149 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6150 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6151 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6152 fold_build1 (NEGATE_EXPR, itype, t),
6153 fold_build1 (NEGATE_EXPR, itype, step));
6154 else
6155 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6156 t = fold_convert (itype, t);
6157 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6159 q = create_tmp_reg (itype, "q");
6160 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
6161 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6162 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
6164 tt = create_tmp_reg (itype, "tt");
6165 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
6166 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6167 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
6169 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
6170 stmt = gimple_build_cond_empty (t);
6171 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6173 second_bb = split_block (entry_bb, stmt)->dest;
6174 gsi = gsi_last_bb (second_bb);
6175 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6177 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
6178 GSI_SAME_STMT);
6179 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
6180 build_int_cst (itype, 1));
6181 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6183 third_bb = split_block (second_bb, stmt)->dest;
6184 gsi = gsi_last_bb (third_bb);
6185 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6187 t = build2 (MULT_EXPR, itype, q, threadid);
6188 t = build2 (PLUS_EXPR, itype, t, tt);
6189 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6191 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6192 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6194 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6195 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6197 /* Remove the GIMPLE_OMP_FOR statement. */
6198 gsi_remove (&gsi, true);
6200 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6201 gsi = gsi_start_bb (seq_start_bb);
6203 tree startvar = fd->loop.v;
6204 tree endvar = NULL_TREE;
6206 if (gimple_omp_for_combined_p (fd->for_stmt))
6208 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6209 ? gimple_omp_parallel_clauses (inner_stmt)
6210 : gimple_omp_for_clauses (inner_stmt);
6211 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6212 gcc_assert (innerc);
6213 startvar = OMP_CLAUSE_DECL (innerc);
6214 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6215 OMP_CLAUSE__LOOPTEMP_);
6216 gcc_assert (innerc);
6217 endvar = OMP_CLAUSE_DECL (innerc);
6219 t = fold_convert (itype, s0);
6220 t = fold_build2 (MULT_EXPR, itype, t, step);
6221 if (POINTER_TYPE_P (type))
6222 t = fold_build_pointer_plus (n1, t);
6223 else
6224 t = fold_build2 (PLUS_EXPR, type, t, n1);
6225 t = fold_convert (TREE_TYPE (startvar), t);
6226 t = force_gimple_operand_gsi (&gsi, t,
6227 DECL_P (startvar)
6228 && TREE_ADDRESSABLE (startvar),
6229 NULL_TREE, false, GSI_CONTINUE_LINKING);
6230 stmt = gimple_build_assign (startvar, t);
6231 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6233 t = fold_convert (itype, e0);
6234 t = fold_build2 (MULT_EXPR, itype, t, step);
6235 if (POINTER_TYPE_P (type))
6236 t = fold_build_pointer_plus (n1, t);
6237 else
6238 t = fold_build2 (PLUS_EXPR, type, t, n1);
6239 t = fold_convert (TREE_TYPE (startvar), t);
6240 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6241 false, GSI_CONTINUE_LINKING);
6242 if (endvar)
6244 stmt = gimple_build_assign (endvar, e);
6245 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6246 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6247 stmt = gimple_build_assign (fd->loop.v, e);
6248 else
6249 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6250 NULL_TREE);
6251 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6253 if (fd->collapse > 1)
6254 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6256 if (!broken_loop)
6258 /* The code controlling the sequential loop replaces the
6259 GIMPLE_OMP_CONTINUE. */
6260 gsi = gsi_last_bb (cont_bb);
6261 stmt = gsi_stmt (gsi);
6262 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6263 vmain = gimple_omp_continue_control_use (stmt);
6264 vback = gimple_omp_continue_control_def (stmt);
6266 if (!gimple_omp_for_combined_p (fd->for_stmt))
6268 if (POINTER_TYPE_P (type))
6269 t = fold_build_pointer_plus (vmain, step);
6270 else
6271 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6272 t = force_gimple_operand_gsi (&gsi, t,
6273 DECL_P (vback)
6274 && TREE_ADDRESSABLE (vback),
6275 NULL_TREE, true, GSI_SAME_STMT);
6276 stmt = gimple_build_assign (vback, t);
6277 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6279 t = build2 (fd->loop.cond_code, boolean_type_node,
6280 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6281 ? t : vback, e);
6282 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6285 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6286 gsi_remove (&gsi, true);
6288 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6289 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6292 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6293 gsi = gsi_last_bb (exit_bb);
6294 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6296 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6297 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6299 gsi_remove (&gsi, true);
6301 /* Connect all the blocks. */
6302 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6303 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6304 ep = find_edge (entry_bb, second_bb);
6305 ep->flags = EDGE_TRUE_VALUE;
6306 ep->probability = REG_BR_PROB_BASE / 4;
6307 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6308 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6310 if (!broken_loop)
6312 ep = find_edge (cont_bb, body_bb);
6313 if (gimple_omp_for_combined_p (fd->for_stmt))
6315 remove_edge (ep);
6316 ep = NULL;
6318 else if (fd->collapse > 1)
6320 remove_edge (ep);
6321 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6323 else
6324 ep->flags = EDGE_TRUE_VALUE;
6325 find_edge (cont_bb, fin_bb)->flags
6326 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6329 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6330 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6331 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6333 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6334 recompute_dominator (CDI_DOMINATORS, body_bb));
6335 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6336 recompute_dominator (CDI_DOMINATORS, fin_bb));
6338 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6340 struct loop *loop = alloc_loop ();
6341 loop->header = body_bb;
6342 if (collapse_bb == NULL)
6343 loop->latch = cont_bb;
6344 add_loop (loop, body_bb->loop_father);
6349 /* A subroutine of expand_omp_for. Generate code for a parallel
6350 loop with static schedule and a specified chunk size. Given
6351 parameters:
6353 for (V = N1; V cond N2; V += STEP) BODY;
6355 where COND is "<" or ">", we generate pseudocode
6357 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6358 if (cond is <)
6359 adj = STEP - 1;
6360 else
6361 adj = STEP + 1;
6362 if ((__typeof (V)) -1 > 0 && cond is >)
6363 n = -(adj + N2 - N1) / -STEP;
6364 else
6365 n = (adj + N2 - N1) / STEP;
6366 trip = 0;
6367 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6368 here so that V is defined
6369 if the loop is not entered
6371 s0 = (trip * nthreads + threadid) * CHUNK;
6372 e0 = min(s0 + CHUNK, n);
6373 if (s0 < n) goto L1; else goto L4;
6375 V = s0 * STEP + N1;
6376 e = e0 * STEP + N1;
6378 BODY;
6379 V += STEP;
6380 if (V cond e) goto L2; else goto L3;
6382 trip += 1;
6383 goto L0;
6387 static void
6388 expand_omp_for_static_chunk (struct omp_region *region,
6389 struct omp_for_data *fd, gimple inner_stmt)
6391 tree n, s0, e0, e, t;
6392 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6393 tree type, itype, vmain, vback, vextra;
6394 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6395 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6396 gimple_stmt_iterator gsi;
6397 gimple stmt;
6398 edge se;
6399 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6400 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6401 bool broken_loop = region->cont == NULL;
6402 tree *counts = NULL;
6403 tree n1, n2, step;
6405 itype = type = TREE_TYPE (fd->loop.v);
6406 if (POINTER_TYPE_P (type))
6407 itype = signed_type_for (type);
6409 entry_bb = region->entry;
6410 se = split_block (entry_bb, last_stmt (entry_bb));
6411 entry_bb = se->src;
6412 iter_part_bb = se->dest;
6413 cont_bb = region->cont;
6414 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6415 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6416 gcc_assert (broken_loop
6417 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6418 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6419 body_bb = single_succ (seq_start_bb);
6420 if (!broken_loop)
6422 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6423 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6424 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6426 exit_bb = region->exit;
6428 /* Trip and adjustment setup goes in ENTRY_BB. */
6429 gsi = gsi_last_bb (entry_bb);
6430 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6432 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6434 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6435 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6438 if (fd->collapse > 1)
6440 int first_zero_iter = -1;
6441 basic_block l2_dom_bb = NULL;
6443 counts = XALLOCAVEC (tree, fd->collapse);
6444 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6445 fin_bb, first_zero_iter,
6446 l2_dom_bb);
6447 t = NULL_TREE;
6449 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6450 t = integer_one_node;
6451 else
6452 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6453 fold_convert (type, fd->loop.n1),
6454 fold_convert (type, fd->loop.n2));
6455 if (fd->collapse == 1
6456 && TYPE_UNSIGNED (type)
6457 && (t == NULL_TREE || !integer_onep (t)))
6459 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6460 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6461 true, GSI_SAME_STMT);
6462 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6463 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6464 true, GSI_SAME_STMT);
6465 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6466 NULL_TREE, NULL_TREE);
6467 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6468 if (walk_tree (gimple_cond_lhs_ptr (stmt),
6469 expand_omp_regimplify_p, NULL, NULL)
6470 || walk_tree (gimple_cond_rhs_ptr (stmt),
6471 expand_omp_regimplify_p, NULL, NULL))
6473 gsi = gsi_for_stmt (stmt);
6474 gimple_regimplify_operands (stmt, &gsi);
6476 se = split_block (entry_bb, stmt);
6477 se->flags = EDGE_TRUE_VALUE;
6478 entry_bb = se->dest;
6479 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6480 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6481 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6482 if (gimple_in_ssa_p (cfun))
6484 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6485 for (gsi = gsi_start_phis (fin_bb);
6486 !gsi_end_p (gsi); gsi_next (&gsi))
6488 gimple phi = gsi_stmt (gsi);
6489 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6490 se, UNKNOWN_LOCATION);
6493 gsi = gsi_last_bb (entry_bb);
6496 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6497 t = fold_convert (itype, t);
6498 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6499 true, GSI_SAME_STMT);
6501 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6502 t = fold_convert (itype, t);
6503 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6504 true, GSI_SAME_STMT);
6506 n1 = fd->loop.n1;
6507 n2 = fd->loop.n2;
6508 step = fd->loop.step;
6509 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6511 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6512 OMP_CLAUSE__LOOPTEMP_);
6513 gcc_assert (innerc);
6514 n1 = OMP_CLAUSE_DECL (innerc);
6515 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6516 OMP_CLAUSE__LOOPTEMP_);
6517 gcc_assert (innerc);
6518 n2 = OMP_CLAUSE_DECL (innerc);
6520 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6521 true, NULL_TREE, true, GSI_SAME_STMT);
6522 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6523 true, NULL_TREE, true, GSI_SAME_STMT);
6524 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6525 true, NULL_TREE, true, GSI_SAME_STMT);
6526 fd->chunk_size
6527 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
6528 true, NULL_TREE, true, GSI_SAME_STMT);
6530 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6531 t = fold_build2 (PLUS_EXPR, itype, step, t);
6532 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6533 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6534 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6535 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6536 fold_build1 (NEGATE_EXPR, itype, t),
6537 fold_build1 (NEGATE_EXPR, itype, step));
6538 else
6539 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6540 t = fold_convert (itype, t);
6541 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6542 true, GSI_SAME_STMT);
6544 trip_var = create_tmp_reg (itype, ".trip");
6545 if (gimple_in_ssa_p (cfun))
6547 trip_init = make_ssa_name (trip_var, NULL);
6548 trip_main = make_ssa_name (trip_var, NULL);
6549 trip_back = make_ssa_name (trip_var, NULL);
6551 else
6553 trip_init = trip_var;
6554 trip_main = trip_var;
6555 trip_back = trip_var;
6558 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
6559 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6561 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6562 t = fold_build2 (MULT_EXPR, itype, t, step);
6563 if (POINTER_TYPE_P (type))
6564 t = fold_build_pointer_plus (n1, t);
6565 else
6566 t = fold_build2 (PLUS_EXPR, type, t, n1);
6567 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6568 true, GSI_SAME_STMT);
6570 /* Remove the GIMPLE_OMP_FOR. */
6571 gsi_remove (&gsi, true);
6573 /* Iteration space partitioning goes in ITER_PART_BB. */
6574 gsi = gsi_last_bb (iter_part_bb);
6576 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6577 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6578 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6579 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6580 false, GSI_CONTINUE_LINKING);
6582 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6583 t = fold_build2 (MIN_EXPR, itype, t, n);
6584 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6585 false, GSI_CONTINUE_LINKING);
6587 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6588 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6590 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6591 gsi = gsi_start_bb (seq_start_bb);
6593 tree startvar = fd->loop.v;
6594 tree endvar = NULL_TREE;
6596 if (gimple_omp_for_combined_p (fd->for_stmt))
6598 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6599 ? gimple_omp_parallel_clauses (inner_stmt)
6600 : gimple_omp_for_clauses (inner_stmt);
6601 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6602 gcc_assert (innerc);
6603 startvar = OMP_CLAUSE_DECL (innerc);
6604 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6605 OMP_CLAUSE__LOOPTEMP_);
6606 gcc_assert (innerc);
6607 endvar = OMP_CLAUSE_DECL (innerc);
6610 t = fold_convert (itype, s0);
6611 t = fold_build2 (MULT_EXPR, itype, t, step);
6612 if (POINTER_TYPE_P (type))
6613 t = fold_build_pointer_plus (n1, t);
6614 else
6615 t = fold_build2 (PLUS_EXPR, type, t, n1);
6616 t = fold_convert (TREE_TYPE (startvar), t);
6617 t = force_gimple_operand_gsi (&gsi, t,
6618 DECL_P (startvar)
6619 && TREE_ADDRESSABLE (startvar),
6620 NULL_TREE, false, GSI_CONTINUE_LINKING);
6621 stmt = gimple_build_assign (startvar, t);
6622 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6624 t = fold_convert (itype, e0);
6625 t = fold_build2 (MULT_EXPR, itype, t, step);
6626 if (POINTER_TYPE_P (type))
6627 t = fold_build_pointer_plus (n1, t);
6628 else
6629 t = fold_build2 (PLUS_EXPR, type, t, n1);
6630 t = fold_convert (TREE_TYPE (startvar), t);
6631 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6632 false, GSI_CONTINUE_LINKING);
6633 if (endvar)
6635 stmt = gimple_build_assign (endvar, e);
6636 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6637 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6638 stmt = gimple_build_assign (fd->loop.v, e);
6639 else
6640 stmt = gimple_build_assign_with_ops (NOP_EXPR, fd->loop.v, e,
6641 NULL_TREE);
6642 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6644 if (fd->collapse > 1)
6645 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6647 if (!broken_loop)
6649 /* The code controlling the sequential loop goes in CONT_BB,
6650 replacing the GIMPLE_OMP_CONTINUE. */
6651 gsi = gsi_last_bb (cont_bb);
6652 stmt = gsi_stmt (gsi);
6653 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6654 vmain = gimple_omp_continue_control_use (stmt);
6655 vback = gimple_omp_continue_control_def (stmt);
6657 if (!gimple_omp_for_combined_p (fd->for_stmt))
6659 if (POINTER_TYPE_P (type))
6660 t = fold_build_pointer_plus (vmain, step);
6661 else
6662 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6663 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
6664 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6665 true, GSI_SAME_STMT);
6666 stmt = gimple_build_assign (vback, t);
6667 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6669 t = build2 (fd->loop.cond_code, boolean_type_node,
6670 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6671 ? t : vback, e);
6672 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6675 /* Remove GIMPLE_OMP_CONTINUE. */
6676 gsi_remove (&gsi, true);
6678 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6679 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6681 /* Trip update code goes into TRIP_UPDATE_BB. */
6682 gsi = gsi_start_bb (trip_update_bb);
6684 t = build_int_cst (itype, 1);
6685 t = build2 (PLUS_EXPR, itype, trip_main, t);
6686 stmt = gimple_build_assign (trip_back, t);
6687 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6690 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6691 gsi = gsi_last_bb (exit_bb);
6692 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6694 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6695 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6697 gsi_remove (&gsi, true);
6699 /* Connect the new blocks. */
6700 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6701 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6703 if (!broken_loop)
6705 se = find_edge (cont_bb, body_bb);
6706 if (gimple_omp_for_combined_p (fd->for_stmt))
6708 remove_edge (se);
6709 se = NULL;
6711 else if (fd->collapse > 1)
6713 remove_edge (se);
6714 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6716 else
6717 se->flags = EDGE_TRUE_VALUE;
6718 find_edge (cont_bb, trip_update_bb)->flags
6719 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6721 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6724 if (gimple_in_ssa_p (cfun))
6726 gimple_stmt_iterator psi;
6727 gimple phi;
6728 edge re, ene;
6729 edge_var_map *vm;
6730 size_t i;
6732 gcc_assert (fd->collapse == 1 && !broken_loop);
6734 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6735 remove arguments of the phi nodes in fin_bb. We need to create
6736 appropriate phi nodes in iter_part_bb instead. */
6737 se = single_pred_edge (fin_bb);
6738 re = single_succ_edge (trip_update_bb);
6739 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
6740 ene = single_succ_edge (entry_bb);
6742 psi = gsi_start_phis (fin_bb);
6743 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6744 gsi_next (&psi), ++i)
6746 gimple nphi;
6747 source_location locus;
6749 phi = gsi_stmt (psi);
6750 t = gimple_phi_result (phi);
6751 gcc_assert (t == redirect_edge_var_map_result (vm));
6752 nphi = create_phi_node (t, iter_part_bb);
6754 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6755 locus = gimple_phi_arg_location_from_edge (phi, se);
6757 /* A special case -- fd->loop.v is not yet computed in
6758 iter_part_bb, we need to use vextra instead. */
6759 if (t == fd->loop.v)
6760 t = vextra;
6761 add_phi_arg (nphi, t, ene, locus);
6762 locus = redirect_edge_var_map_location (vm);
6763 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6765 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6766 redirect_edge_var_map_clear (re);
6767 while (1)
6769 psi = gsi_start_phis (fin_bb);
6770 if (gsi_end_p (psi))
6771 break;
6772 remove_phi_node (&psi, false);
6775 /* Make phi node for trip. */
6776 phi = create_phi_node (trip_main, iter_part_bb);
6777 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6778 UNKNOWN_LOCATION);
6779 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6780 UNKNOWN_LOCATION);
6783 if (!broken_loop)
6784 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6785 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6786 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6787 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6788 recompute_dominator (CDI_DOMINATORS, fin_bb));
6789 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6790 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6791 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6792 recompute_dominator (CDI_DOMINATORS, body_bb));
6794 if (!broken_loop)
6796 struct loop *trip_loop = alloc_loop ();
6797 trip_loop->header = iter_part_bb;
6798 trip_loop->latch = trip_update_bb;
6799 add_loop (trip_loop, iter_part_bb->loop_father);
6801 if (!gimple_omp_for_combined_p (fd->for_stmt))
6803 struct loop *loop = alloc_loop ();
6804 loop->header = body_bb;
6805 if (collapse_bb == NULL)
6806 loop->latch = cont_bb;
6807 add_loop (loop, trip_loop);
6812 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
6813 Given parameters:
6814 for (V = N1; V cond N2; V += STEP) BODY;
6816 where COND is "<" or ">" or "!=", we generate pseudocode
6818 for (ind_var = low; ind_var < high; ind_var++)
6820 V = n1 + (ind_var * STEP)
6822 <BODY>
6825 In the above pseudocode, low and high are function parameters of the
6826 child function. In the function below, we are inserting a temp.
6827 variable that will be making a call to two OMP functions that will not be
6828 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
6829 with _Cilk_for). These functions are replaced with low and high
6830 by the function that handles taskreg. */
6833 static void
6834 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
6836 bool broken_loop = region->cont == NULL;
6837 basic_block entry_bb = region->entry;
6838 basic_block cont_bb = region->cont;
6840 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6841 gcc_assert (broken_loop
6842 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6843 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6844 basic_block l1_bb, l2_bb;
6846 if (!broken_loop)
6848 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6849 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6850 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6851 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6853 else
6855 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6856 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6857 l2_bb = single_succ (l1_bb);
6859 basic_block exit_bb = region->exit;
6860 basic_block l2_dom_bb = NULL;
6862 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
6864 /* Below statements until the "tree high_val = ..." are pseudo statements
6865 used to pass information to be used by expand_omp_taskreg.
6866 low_val and high_val will be replaced by the __low and __high
6867 parameter from the child function.
6869 The call_exprs part is a place-holder, it is mainly used
6870 to distinctly identify to the top-level part that this is
6871 where we should put low and high (reasoning given in header
6872 comment). */
6874 tree child_fndecl
6875 = gimple_omp_parallel_child_fn (last_stmt (region->outer->entry));
6876 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
6877 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
6879 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
6880 high_val = t;
6881 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
6882 low_val = t;
6884 gcc_assert (low_val && high_val);
6886 tree type = TREE_TYPE (low_val);
6887 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
6888 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6890 /* Not needed in SSA form right now. */
6891 gcc_assert (!gimple_in_ssa_p (cfun));
6892 if (l2_dom_bb == NULL)
6893 l2_dom_bb = l1_bb;
6895 tree n1 = low_val;
6896 tree n2 = high_val;
6898 gimple stmt = gimple_build_assign (ind_var, n1);
6900 /* Replace the GIMPLE_OMP_FOR statement. */
6901 gsi_replace (&gsi, stmt, true);
6903 if (!broken_loop)
6905 /* Code to control the increment goes in the CONT_BB. */
6906 gsi = gsi_last_bb (cont_bb);
6907 stmt = gsi_stmt (gsi);
6908 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6909 stmt = gimple_build_assign_with_ops (PLUS_EXPR, ind_var, ind_var,
6910 build_one_cst (type));
6912 /* Replace GIMPLE_OMP_CONTINUE. */
6913 gsi_replace (&gsi, stmt, true);
6916 /* Emit the condition in L1_BB. */
6917 gsi = gsi_after_labels (l1_bb);
6918 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
6919 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
6920 fd->loop.step);
6921 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
6922 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6923 fd->loop.n1, fold_convert (sizetype, t));
6924 else
6925 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
6926 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
6927 t = fold_convert (TREE_TYPE (fd->loop.v), t);
6928 expand_omp_build_assign (&gsi, fd->loop.v, t);
6930 /* The condition is always '<' since the runtime will fill in the low
6931 and high values. */
6932 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
6933 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
6935 /* Remove GIMPLE_OMP_RETURN. */
6936 gsi = gsi_last_bb (exit_bb);
6937 gsi_remove (&gsi, true);
6939 /* Connect the new blocks. */
6940 remove_edge (FALLTHRU_EDGE (entry_bb));
6942 edge e, ne;
6943 if (!broken_loop)
6945 remove_edge (BRANCH_EDGE (entry_bb));
6946 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6948 e = BRANCH_EDGE (l1_bb);
6949 ne = FALLTHRU_EDGE (l1_bb);
6950 e->flags = EDGE_TRUE_VALUE;
6952 else
6954 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6956 ne = single_succ_edge (l1_bb);
6957 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6960 ne->flags = EDGE_FALSE_VALUE;
6961 e->probability = REG_BR_PROB_BASE * 7 / 8;
6962 ne->probability = REG_BR_PROB_BASE / 8;
6964 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6965 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6966 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6968 if (!broken_loop)
6970 struct loop *loop = alloc_loop ();
6971 loop->header = l1_bb;
6972 loop->latch = cont_bb;
6973 add_loop (loop, l1_bb->loop_father);
6974 loop->safelen = INT_MAX;
6977 /* Pick the correct library function based on the precision of the
6978 induction variable type. */
6979 tree lib_fun = NULL_TREE;
6980 if (TYPE_PRECISION (type) == 32)
6981 lib_fun = cilk_for_32_fndecl;
6982 else if (TYPE_PRECISION (type) == 64)
6983 lib_fun = cilk_for_64_fndecl;
6984 else
6985 gcc_unreachable ();
6987 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
6989 /* WS_ARGS contains the library function flavor to call:
6990 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
6991 user-defined grain value. If the user does not define one, then zero
6992 is passed in by the parser. */
6993 vec_alloc (region->ws_args, 2);
6994 region->ws_args->quick_push (lib_fun);
6995 region->ws_args->quick_push (fd->chunk_size);
6998 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
6999 loop. Given parameters:
7001 for (V = N1; V cond N2; V += STEP) BODY;
7003 where COND is "<" or ">", we generate pseudocode
7005 V = N1;
7006 goto L1;
7008 BODY;
7009 V += STEP;
7011 if (V cond N2) goto L0; else goto L2;
7014 For collapsed loops, given parameters:
7015 collapse(3)
7016 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
7017 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
7018 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
7019 BODY;
7021 we generate pseudocode
7023 if (cond3 is <)
7024 adj = STEP3 - 1;
7025 else
7026 adj = STEP3 + 1;
7027 count3 = (adj + N32 - N31) / STEP3;
7028 if (cond2 is <)
7029 adj = STEP2 - 1;
7030 else
7031 adj = STEP2 + 1;
7032 count2 = (adj + N22 - N21) / STEP2;
7033 if (cond1 is <)
7034 adj = STEP1 - 1;
7035 else
7036 adj = STEP1 + 1;
7037 count1 = (adj + N12 - N11) / STEP1;
7038 count = count1 * count2 * count3;
7039 V = 0;
7040 V1 = N11;
7041 V2 = N21;
7042 V3 = N31;
7043 goto L1;
7045 BODY;
7046 V += 1;
7047 V3 += STEP3;
7048 V2 += (V3 cond3 N32) ? 0 : STEP2;
7049 V3 = (V3 cond3 N32) ? V3 : N31;
7050 V1 += (V2 cond2 N22) ? 0 : STEP1;
7051 V2 = (V2 cond2 N22) ? V2 : N21;
7053 if (V < count) goto L0; else goto L2;
7058 static void
7059 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
7061 tree type, t;
7062 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
7063 gimple_stmt_iterator gsi;
7064 gimple stmt;
7065 bool broken_loop = region->cont == NULL;
7066 edge e, ne;
7067 tree *counts = NULL;
7068 int i;
7069 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7070 OMP_CLAUSE_SAFELEN);
7071 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7072 OMP_CLAUSE__SIMDUID_);
7073 tree n1, n2;
7075 type = TREE_TYPE (fd->loop.v);
7076 entry_bb = region->entry;
7077 cont_bb = region->cont;
7078 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7079 gcc_assert (broken_loop
7080 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7081 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7082 if (!broken_loop)
7084 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7085 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7086 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7087 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7089 else
7091 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7092 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7093 l2_bb = single_succ (l1_bb);
7095 exit_bb = region->exit;
7096 l2_dom_bb = NULL;
7098 gsi = gsi_last_bb (entry_bb);
7100 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7101 /* Not needed in SSA form right now. */
7102 gcc_assert (!gimple_in_ssa_p (cfun));
7103 if (fd->collapse > 1)
7105 int first_zero_iter = -1;
7106 basic_block zero_iter_bb = l2_bb;
7108 counts = XALLOCAVEC (tree, fd->collapse);
7109 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
7110 zero_iter_bb, first_zero_iter,
7111 l2_dom_bb);
7113 if (l2_dom_bb == NULL)
7114 l2_dom_bb = l1_bb;
7116 n1 = fd->loop.n1;
7117 n2 = fd->loop.n2;
7118 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7120 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7121 OMP_CLAUSE__LOOPTEMP_);
7122 gcc_assert (innerc);
7123 n1 = OMP_CLAUSE_DECL (innerc);
7124 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7125 OMP_CLAUSE__LOOPTEMP_);
7126 gcc_assert (innerc);
7127 n2 = OMP_CLAUSE_DECL (innerc);
7128 expand_omp_build_assign (&gsi, fd->loop.v,
7129 fold_convert (type, n1));
7130 if (fd->collapse > 1)
7132 gsi_prev (&gsi);
7133 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
7134 gsi_next (&gsi);
7137 else
7139 expand_omp_build_assign (&gsi, fd->loop.v,
7140 fold_convert (type, fd->loop.n1));
7141 if (fd->collapse > 1)
7142 for (i = 0; i < fd->collapse; i++)
7144 tree itype = TREE_TYPE (fd->loops[i].v);
7145 if (POINTER_TYPE_P (itype))
7146 itype = signed_type_for (itype);
7147 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
7148 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7152 /* Remove the GIMPLE_OMP_FOR statement. */
7153 gsi_remove (&gsi, true);
7155 if (!broken_loop)
7157 /* Code to control the increment goes in the CONT_BB. */
7158 gsi = gsi_last_bb (cont_bb);
7159 stmt = gsi_stmt (gsi);
7160 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7162 if (POINTER_TYPE_P (type))
7163 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
7164 else
7165 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
7166 expand_omp_build_assign (&gsi, fd->loop.v, t);
7168 if (fd->collapse > 1)
7170 i = fd->collapse - 1;
7171 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
7173 t = fold_convert (sizetype, fd->loops[i].step);
7174 t = fold_build_pointer_plus (fd->loops[i].v, t);
7176 else
7178 t = fold_convert (TREE_TYPE (fd->loops[i].v),
7179 fd->loops[i].step);
7180 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
7181 fd->loops[i].v, t);
7183 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7185 for (i = fd->collapse - 1; i > 0; i--)
7187 tree itype = TREE_TYPE (fd->loops[i].v);
7188 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
7189 if (POINTER_TYPE_P (itype2))
7190 itype2 = signed_type_for (itype2);
7191 t = build3 (COND_EXPR, itype2,
7192 build2 (fd->loops[i].cond_code, boolean_type_node,
7193 fd->loops[i].v,
7194 fold_convert (itype, fd->loops[i].n2)),
7195 build_int_cst (itype2, 0),
7196 fold_convert (itype2, fd->loops[i - 1].step));
7197 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
7198 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
7199 else
7200 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
7201 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
7203 t = build3 (COND_EXPR, itype,
7204 build2 (fd->loops[i].cond_code, boolean_type_node,
7205 fd->loops[i].v,
7206 fold_convert (itype, fd->loops[i].n2)),
7207 fd->loops[i].v,
7208 fold_convert (itype, fd->loops[i].n1));
7209 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7213 /* Remove GIMPLE_OMP_CONTINUE. */
7214 gsi_remove (&gsi, true);
7217 /* Emit the condition in L1_BB. */
7218 gsi = gsi_start_bb (l1_bb);
7220 t = fold_convert (type, n2);
7221 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7222 false, GSI_CONTINUE_LINKING);
7223 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
7224 stmt = gimple_build_cond_empty (t);
7225 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7226 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
7227 NULL, NULL)
7228 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
7229 NULL, NULL))
7231 gsi = gsi_for_stmt (stmt);
7232 gimple_regimplify_operands (stmt, &gsi);
7235 /* Remove GIMPLE_OMP_RETURN. */
7236 gsi = gsi_last_bb (exit_bb);
7237 gsi_remove (&gsi, true);
7239 /* Connect the new blocks. */
7240 remove_edge (FALLTHRU_EDGE (entry_bb));
7242 if (!broken_loop)
7244 remove_edge (BRANCH_EDGE (entry_bb));
7245 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7247 e = BRANCH_EDGE (l1_bb);
7248 ne = FALLTHRU_EDGE (l1_bb);
7249 e->flags = EDGE_TRUE_VALUE;
7251 else
7253 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7255 ne = single_succ_edge (l1_bb);
7256 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7259 ne->flags = EDGE_FALSE_VALUE;
7260 e->probability = REG_BR_PROB_BASE * 7 / 8;
7261 ne->probability = REG_BR_PROB_BASE / 8;
7263 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7264 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7265 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7267 if (!broken_loop)
7269 struct loop *loop = alloc_loop ();
7270 loop->header = l1_bb;
7271 loop->latch = cont_bb;
7272 add_loop (loop, l1_bb->loop_father);
7273 if (safelen == NULL_TREE)
7274 loop->safelen = INT_MAX;
7275 else
7277 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
7278 if (TREE_CODE (safelen) != INTEGER_CST)
7279 loop->safelen = 0;
7280 else if (!tree_fits_uhwi_p (safelen)
7281 || tree_to_uhwi (safelen) > INT_MAX)
7282 loop->safelen = INT_MAX;
7283 else
7284 loop->safelen = tree_to_uhwi (safelen);
7285 if (loop->safelen == 1)
7286 loop->safelen = 0;
7288 if (simduid)
7290 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
7291 cfun->has_simduid_loops = true;
7293 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7294 the loop. */
7295 if ((flag_tree_loop_vectorize
7296 || (!global_options_set.x_flag_tree_loop_vectorize
7297 && !global_options_set.x_flag_tree_vectorize))
7298 && flag_tree_loop_optimize
7299 && loop->safelen > 1)
7301 loop->force_vectorize = true;
7302 cfun->has_force_vectorize_loops = true;
7308 /* Expand the OpenMP loop defined by REGION. */
7310 static void
7311 expand_omp_for (struct omp_region *region, gimple inner_stmt)
7313 struct omp_for_data fd;
7314 struct omp_for_data_loop *loops;
7316 loops
7317 = (struct omp_for_data_loop *)
7318 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
7319 * sizeof (struct omp_for_data_loop));
7320 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
7321 region->sched_kind = fd.sched_kind;
7323 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
7324 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7325 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7326 if (region->cont)
7328 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
7329 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7330 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7332 else
7333 /* If there isn't a continue then this is a degerate case where
7334 the introduction of abnormal edges during lowering will prevent
7335 original loops from being detected. Fix that up. */
7336 loops_state_set (LOOPS_NEED_FIXUP);
7338 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
7339 expand_omp_simd (region, &fd);
7340 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
7341 expand_cilk_for (region, &fd);
7342 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
7343 && !fd.have_ordered)
7345 if (fd.chunk_size == NULL)
7346 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
7347 else
7348 expand_omp_for_static_chunk (region, &fd, inner_stmt);
7350 else
7352 int fn_index, start_ix, next_ix;
7354 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
7355 == GF_OMP_FOR_KIND_FOR);
7356 if (fd.chunk_size == NULL
7357 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
7358 fd.chunk_size = integer_zero_node;
7359 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
7360 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
7361 ? 3 : fd.sched_kind;
7362 fn_index += fd.have_ordered * 4;
7363 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
7364 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
7365 if (fd.iter_type == long_long_unsigned_type_node)
7367 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
7368 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
7369 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
7370 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
7372 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
7373 (enum built_in_function) next_ix, inner_stmt);
7376 if (gimple_in_ssa_p (cfun))
7377 update_ssa (TODO_update_ssa_only_virtuals);
7381 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7383 v = GOMP_sections_start (n);
7385 switch (v)
7387 case 0:
7388 goto L2;
7389 case 1:
7390 section 1;
7391 goto L1;
7392 case 2:
7394 case n:
7396 default:
7397 abort ();
7400 v = GOMP_sections_next ();
7401 goto L0;
7403 reduction;
7405 If this is a combined parallel sections, replace the call to
7406 GOMP_sections_start with call to GOMP_sections_next. */
7408 static void
7409 expand_omp_sections (struct omp_region *region)
7411 tree t, u, vin = NULL, vmain, vnext, l2;
7412 unsigned len;
7413 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7414 gimple_stmt_iterator si, switch_si;
7415 gimple sections_stmt, stmt, cont;
7416 edge_iterator ei;
7417 edge e;
7418 struct omp_region *inner;
7419 unsigned i, casei;
7420 bool exit_reachable = region->cont != NULL;
7422 gcc_assert (region->exit != NULL);
7423 entry_bb = region->entry;
7424 l0_bb = single_succ (entry_bb);
7425 l1_bb = region->cont;
7426 l2_bb = region->exit;
7427 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7428 l2 = gimple_block_label (l2_bb);
7429 else
7431 /* This can happen if there are reductions. */
7432 len = EDGE_COUNT (l0_bb->succs);
7433 gcc_assert (len > 0);
7434 e = EDGE_SUCC (l0_bb, len - 1);
7435 si = gsi_last_bb (e->dest);
7436 l2 = NULL_TREE;
7437 if (gsi_end_p (si)
7438 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7439 l2 = gimple_block_label (e->dest);
7440 else
7441 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7443 si = gsi_last_bb (e->dest);
7444 if (gsi_end_p (si)
7445 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7447 l2 = gimple_block_label (e->dest);
7448 break;
7452 if (exit_reachable)
7453 default_bb = create_empty_bb (l1_bb->prev_bb);
7454 else
7455 default_bb = create_empty_bb (l0_bb);
7457 /* We will build a switch() with enough cases for all the
7458 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
7459 and a default case to abort if something goes wrong. */
7460 len = EDGE_COUNT (l0_bb->succs);
7462 /* Use vec::quick_push on label_vec throughout, since we know the size
7463 in advance. */
7464 auto_vec<tree> label_vec (len);
7466 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
7467 GIMPLE_OMP_SECTIONS statement. */
7468 si = gsi_last_bb (entry_bb);
7469 sections_stmt = gsi_stmt (si);
7470 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
7471 vin = gimple_omp_sections_control (sections_stmt);
7472 if (!is_combined_parallel (region))
7474 /* If we are not inside a combined parallel+sections region,
7475 call GOMP_sections_start. */
7476 t = build_int_cst (unsigned_type_node, len - 1);
7477 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
7478 stmt = gimple_build_call (u, 1, t);
7480 else
7482 /* Otherwise, call GOMP_sections_next. */
7483 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7484 stmt = gimple_build_call (u, 0);
7486 gimple_call_set_lhs (stmt, vin);
7487 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7488 gsi_remove (&si, true);
7490 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
7491 L0_BB. */
7492 switch_si = gsi_last_bb (l0_bb);
7493 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
7494 if (exit_reachable)
7496 cont = last_stmt (l1_bb);
7497 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
7498 vmain = gimple_omp_continue_control_use (cont);
7499 vnext = gimple_omp_continue_control_def (cont);
7501 else
7503 vmain = vin;
7504 vnext = NULL_TREE;
7507 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
7508 label_vec.quick_push (t);
7509 i = 1;
7511 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7512 for (inner = region->inner, casei = 1;
7513 inner;
7514 inner = inner->next, i++, casei++)
7516 basic_block s_entry_bb, s_exit_bb;
7518 /* Skip optional reduction region. */
7519 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7521 --i;
7522 --casei;
7523 continue;
7526 s_entry_bb = inner->entry;
7527 s_exit_bb = inner->exit;
7529 t = gimple_block_label (s_entry_bb);
7530 u = build_int_cst (unsigned_type_node, casei);
7531 u = build_case_label (u, NULL, t);
7532 label_vec.quick_push (u);
7534 si = gsi_last_bb (s_entry_bb);
7535 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7536 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7537 gsi_remove (&si, true);
7538 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7540 if (s_exit_bb == NULL)
7541 continue;
7543 si = gsi_last_bb (s_exit_bb);
7544 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7545 gsi_remove (&si, true);
7547 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7550 /* Error handling code goes in DEFAULT_BB. */
7551 t = gimple_block_label (default_bb);
7552 u = build_case_label (NULL, NULL, t);
7553 make_edge (l0_bb, default_bb, 0);
7554 add_bb_to_loop (default_bb, current_loops->tree_root);
7556 stmt = gimple_build_switch (vmain, u, label_vec);
7557 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7558 gsi_remove (&switch_si, true);
7560 si = gsi_start_bb (default_bb);
7561 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7562 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7564 if (exit_reachable)
7566 tree bfn_decl;
7568 /* Code to get the next section goes in L1_BB. */
7569 si = gsi_last_bb (l1_bb);
7570 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7572 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7573 stmt = gimple_build_call (bfn_decl, 0);
7574 gimple_call_set_lhs (stmt, vnext);
7575 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7576 gsi_remove (&si, true);
7578 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7581 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7582 si = gsi_last_bb (l2_bb);
7583 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7584 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7585 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7586 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7587 else
7588 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7589 stmt = gimple_build_call (t, 0);
7590 if (gimple_omp_return_lhs (gsi_stmt (si)))
7591 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7592 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7593 gsi_remove (&si, true);
7595 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7599 /* Expand code for an OpenMP single directive. We've already expanded
7600 much of the code, here we simply place the GOMP_barrier call. */
7602 static void
7603 expand_omp_single (struct omp_region *region)
7605 basic_block entry_bb, exit_bb;
7606 gimple_stmt_iterator si;
7608 entry_bb = region->entry;
7609 exit_bb = region->exit;
7611 si = gsi_last_bb (entry_bb);
7612 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7613 gsi_remove (&si, true);
7614 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7616 si = gsi_last_bb (exit_bb);
7617 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7619 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7620 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7622 gsi_remove (&si, true);
7623 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7627 /* Generic expansion for OpenMP synchronization directives: master,
7628 ordered and critical. All we need to do here is remove the entry
7629 and exit markers for REGION. */
7631 static void
7632 expand_omp_synch (struct omp_region *region)
7634 basic_block entry_bb, exit_bb;
7635 gimple_stmt_iterator si;
7637 entry_bb = region->entry;
7638 exit_bb = region->exit;
7640 si = gsi_last_bb (entry_bb);
7641 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7642 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7643 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7644 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7645 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7646 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7647 gsi_remove (&si, true);
7648 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7650 if (exit_bb)
7652 si = gsi_last_bb (exit_bb);
7653 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7654 gsi_remove (&si, true);
7655 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7659 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7660 operation as a normal volatile load. */
7662 static bool
7663 expand_omp_atomic_load (basic_block load_bb, tree addr,
7664 tree loaded_val, int index)
7666 enum built_in_function tmpbase;
7667 gimple_stmt_iterator gsi;
7668 basic_block store_bb;
7669 location_t loc;
7670 gimple stmt;
7671 tree decl, call, type, itype;
7673 gsi = gsi_last_bb (load_bb);
7674 stmt = gsi_stmt (gsi);
7675 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7676 loc = gimple_location (stmt);
7678 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7679 is smaller than word size, then expand_atomic_load assumes that the load
7680 is atomic. We could avoid the builtin entirely in this case. */
7682 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7683 decl = builtin_decl_explicit (tmpbase);
7684 if (decl == NULL_TREE)
7685 return false;
7687 type = TREE_TYPE (loaded_val);
7688 itype = TREE_TYPE (TREE_TYPE (decl));
7690 call = build_call_expr_loc (loc, decl, 2, addr,
7691 build_int_cst (NULL,
7692 gimple_omp_atomic_seq_cst_p (stmt)
7693 ? MEMMODEL_SEQ_CST
7694 : MEMMODEL_RELAXED));
7695 if (!useless_type_conversion_p (type, itype))
7696 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7697 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7699 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7700 gsi_remove (&gsi, true);
7702 store_bb = single_succ (load_bb);
7703 gsi = gsi_last_bb (store_bb);
7704 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7705 gsi_remove (&gsi, true);
7707 if (gimple_in_ssa_p (cfun))
7708 update_ssa (TODO_update_ssa_no_phi);
7710 return true;
7713 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7714 operation as a normal volatile store. */
7716 static bool
7717 expand_omp_atomic_store (basic_block load_bb, tree addr,
7718 tree loaded_val, tree stored_val, int index)
7720 enum built_in_function tmpbase;
7721 gimple_stmt_iterator gsi;
7722 basic_block store_bb = single_succ (load_bb);
7723 location_t loc;
7724 gimple stmt;
7725 tree decl, call, type, itype;
7726 enum machine_mode imode;
7727 bool exchange;
7729 gsi = gsi_last_bb (load_bb);
7730 stmt = gsi_stmt (gsi);
7731 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7733 /* If the load value is needed, then this isn't a store but an exchange. */
7734 exchange = gimple_omp_atomic_need_value_p (stmt);
7736 gsi = gsi_last_bb (store_bb);
7737 stmt = gsi_stmt (gsi);
7738 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7739 loc = gimple_location (stmt);
7741 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7742 is smaller than word size, then expand_atomic_store assumes that the store
7743 is atomic. We could avoid the builtin entirely in this case. */
7745 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7746 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7747 decl = builtin_decl_explicit (tmpbase);
7748 if (decl == NULL_TREE)
7749 return false;
7751 type = TREE_TYPE (stored_val);
7753 /* Dig out the type of the function's second argument. */
7754 itype = TREE_TYPE (decl);
7755 itype = TYPE_ARG_TYPES (itype);
7756 itype = TREE_CHAIN (itype);
7757 itype = TREE_VALUE (itype);
7758 imode = TYPE_MODE (itype);
7760 if (exchange && !can_atomic_exchange_p (imode, true))
7761 return false;
7763 if (!useless_type_conversion_p (itype, type))
7764 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7765 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7766 build_int_cst (NULL,
7767 gimple_omp_atomic_seq_cst_p (stmt)
7768 ? MEMMODEL_SEQ_CST
7769 : MEMMODEL_RELAXED));
7770 if (exchange)
7772 if (!useless_type_conversion_p (type, itype))
7773 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7774 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7777 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7778 gsi_remove (&gsi, true);
7780 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7781 gsi = gsi_last_bb (load_bb);
7782 gsi_remove (&gsi, true);
7784 if (gimple_in_ssa_p (cfun))
7785 update_ssa (TODO_update_ssa_no_phi);
7787 return true;
7790 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7791 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7792 size of the data type, and thus usable to find the index of the builtin
7793 decl. Returns false if the expression is not of the proper form. */
7795 static bool
7796 expand_omp_atomic_fetch_op (basic_block load_bb,
7797 tree addr, tree loaded_val,
7798 tree stored_val, int index)
7800 enum built_in_function oldbase, newbase, tmpbase;
7801 tree decl, itype, call;
7802 tree lhs, rhs;
7803 basic_block store_bb = single_succ (load_bb);
7804 gimple_stmt_iterator gsi;
7805 gimple stmt;
7806 location_t loc;
7807 enum tree_code code;
7808 bool need_old, need_new;
7809 enum machine_mode imode;
7810 bool seq_cst;
7812 /* We expect to find the following sequences:
7814 load_bb:
7815 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7817 store_bb:
7818 val = tmp OP something; (or: something OP tmp)
7819 GIMPLE_OMP_STORE (val)
7821 ???FIXME: Allow a more flexible sequence.
7822 Perhaps use data flow to pick the statements.
7826 gsi = gsi_after_labels (store_bb);
7827 stmt = gsi_stmt (gsi);
7828 loc = gimple_location (stmt);
7829 if (!is_gimple_assign (stmt))
7830 return false;
7831 gsi_next (&gsi);
7832 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7833 return false;
7834 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7835 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7836 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7837 gcc_checking_assert (!need_old || !need_new);
7839 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7840 return false;
7842 /* Check for one of the supported fetch-op operations. */
7843 code = gimple_assign_rhs_code (stmt);
7844 switch (code)
7846 case PLUS_EXPR:
7847 case POINTER_PLUS_EXPR:
7848 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7849 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7850 break;
7851 case MINUS_EXPR:
7852 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7853 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7854 break;
7855 case BIT_AND_EXPR:
7856 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7857 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7858 break;
7859 case BIT_IOR_EXPR:
7860 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7861 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7862 break;
7863 case BIT_XOR_EXPR:
7864 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7865 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7866 break;
7867 default:
7868 return false;
7871 /* Make sure the expression is of the proper form. */
7872 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7873 rhs = gimple_assign_rhs2 (stmt);
7874 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7875 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7876 rhs = gimple_assign_rhs1 (stmt);
7877 else
7878 return false;
7880 tmpbase = ((enum built_in_function)
7881 ((need_new ? newbase : oldbase) + index + 1));
7882 decl = builtin_decl_explicit (tmpbase);
7883 if (decl == NULL_TREE)
7884 return false;
7885 itype = TREE_TYPE (TREE_TYPE (decl));
7886 imode = TYPE_MODE (itype);
7888 /* We could test all of the various optabs involved, but the fact of the
7889 matter is that (with the exception of i486 vs i586 and xadd) all targets
7890 that support any atomic operaton optab also implements compare-and-swap.
7891 Let optabs.c take care of expanding any compare-and-swap loop. */
7892 if (!can_compare_and_swap_p (imode, true))
7893 return false;
7895 gsi = gsi_last_bb (load_bb);
7896 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7898 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7899 It only requires that the operation happen atomically. Thus we can
7900 use the RELAXED memory model. */
7901 call = build_call_expr_loc (loc, decl, 3, addr,
7902 fold_convert_loc (loc, itype, rhs),
7903 build_int_cst (NULL,
7904 seq_cst ? MEMMODEL_SEQ_CST
7905 : MEMMODEL_RELAXED));
7907 if (need_old || need_new)
7909 lhs = need_old ? loaded_val : stored_val;
7910 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7911 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7913 else
7914 call = fold_convert_loc (loc, void_type_node, call);
7915 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7916 gsi_remove (&gsi, true);
7918 gsi = gsi_last_bb (store_bb);
7919 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7920 gsi_remove (&gsi, true);
7921 gsi = gsi_last_bb (store_bb);
7922 gsi_remove (&gsi, true);
7924 if (gimple_in_ssa_p (cfun))
7925 update_ssa (TODO_update_ssa_no_phi);
7927 return true;
7930 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7932 oldval = *addr;
7933 repeat:
7934 newval = rhs; // with oldval replacing *addr in rhs
7935 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7936 if (oldval != newval)
7937 goto repeat;
7939 INDEX is log2 of the size of the data type, and thus usable to find the
7940 index of the builtin decl. */
7942 static bool
7943 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7944 tree addr, tree loaded_val, tree stored_val,
7945 int index)
7947 tree loadedi, storedi, initial, new_storedi, old_vali;
7948 tree type, itype, cmpxchg, iaddr;
7949 gimple_stmt_iterator si;
7950 basic_block loop_header = single_succ (load_bb);
7951 gimple phi, stmt;
7952 edge e;
7953 enum built_in_function fncode;
7955 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7956 order to use the RELAXED memory model effectively. */
7957 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7958 + index + 1);
7959 cmpxchg = builtin_decl_explicit (fncode);
7960 if (cmpxchg == NULL_TREE)
7961 return false;
7962 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7963 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7965 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7966 return false;
7968 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7969 si = gsi_last_bb (load_bb);
7970 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7972 /* For floating-point values, we'll need to view-convert them to integers
7973 so that we can perform the atomic compare and swap. Simplify the
7974 following code by always setting up the "i"ntegral variables. */
7975 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7977 tree iaddr_val;
7979 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7980 true), NULL);
7981 iaddr_val
7982 = force_gimple_operand_gsi (&si,
7983 fold_convert (TREE_TYPE (iaddr), addr),
7984 false, NULL_TREE, true, GSI_SAME_STMT);
7985 stmt = gimple_build_assign (iaddr, iaddr_val);
7986 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7987 loadedi = create_tmp_var (itype, NULL);
7988 if (gimple_in_ssa_p (cfun))
7989 loadedi = make_ssa_name (loadedi, NULL);
7991 else
7993 iaddr = addr;
7994 loadedi = loaded_val;
7997 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7998 tree loaddecl = builtin_decl_explicit (fncode);
7999 if (loaddecl)
8000 initial
8001 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
8002 build_call_expr (loaddecl, 2, iaddr,
8003 build_int_cst (NULL_TREE,
8004 MEMMODEL_RELAXED)));
8005 else
8006 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
8007 build_int_cst (TREE_TYPE (iaddr), 0));
8009 initial
8010 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
8011 GSI_SAME_STMT);
8013 /* Move the value to the LOADEDI temporary. */
8014 if (gimple_in_ssa_p (cfun))
8016 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
8017 phi = create_phi_node (loadedi, loop_header);
8018 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
8019 initial);
8021 else
8022 gsi_insert_before (&si,
8023 gimple_build_assign (loadedi, initial),
8024 GSI_SAME_STMT);
8025 if (loadedi != loaded_val)
8027 gimple_stmt_iterator gsi2;
8028 tree x;
8030 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
8031 gsi2 = gsi_start_bb (loop_header);
8032 if (gimple_in_ssa_p (cfun))
8034 gimple stmt;
8035 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8036 true, GSI_SAME_STMT);
8037 stmt = gimple_build_assign (loaded_val, x);
8038 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
8040 else
8042 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
8043 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8044 true, GSI_SAME_STMT);
8047 gsi_remove (&si, true);
8049 si = gsi_last_bb (store_bb);
8050 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8052 if (iaddr == addr)
8053 storedi = stored_val;
8054 else
8055 storedi =
8056 force_gimple_operand_gsi (&si,
8057 build1 (VIEW_CONVERT_EXPR, itype,
8058 stored_val), true, NULL_TREE, true,
8059 GSI_SAME_STMT);
8061 /* Build the compare&swap statement. */
8062 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
8063 new_storedi = force_gimple_operand_gsi (&si,
8064 fold_convert (TREE_TYPE (loadedi),
8065 new_storedi),
8066 true, NULL_TREE,
8067 true, GSI_SAME_STMT);
8069 if (gimple_in_ssa_p (cfun))
8070 old_vali = loadedi;
8071 else
8073 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
8074 stmt = gimple_build_assign (old_vali, loadedi);
8075 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8077 stmt = gimple_build_assign (loadedi, new_storedi);
8078 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8081 /* Note that we always perform the comparison as an integer, even for
8082 floating point. This allows the atomic operation to properly
8083 succeed even with NaNs and -0.0. */
8084 stmt = gimple_build_cond_empty
8085 (build2 (NE_EXPR, boolean_type_node,
8086 new_storedi, old_vali));
8087 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8089 /* Update cfg. */
8090 e = single_succ_edge (store_bb);
8091 e->flags &= ~EDGE_FALLTHRU;
8092 e->flags |= EDGE_FALSE_VALUE;
8094 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
8096 /* Copy the new value to loadedi (we already did that before the condition
8097 if we are not in SSA). */
8098 if (gimple_in_ssa_p (cfun))
8100 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
8101 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
8104 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
8105 gsi_remove (&si, true);
8107 struct loop *loop = alloc_loop ();
8108 loop->header = loop_header;
8109 loop->latch = store_bb;
8110 add_loop (loop, loop_header->loop_father);
8112 if (gimple_in_ssa_p (cfun))
8113 update_ssa (TODO_update_ssa_no_phi);
8115 return true;
8118 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8120 GOMP_atomic_start ();
8121 *addr = rhs;
8122 GOMP_atomic_end ();
8124 The result is not globally atomic, but works so long as all parallel
8125 references are within #pragma omp atomic directives. According to
8126 responses received from omp@openmp.org, appears to be within spec.
8127 Which makes sense, since that's how several other compilers handle
8128 this situation as well.
8129 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
8130 expanding. STORED_VAL is the operand of the matching
8131 GIMPLE_OMP_ATOMIC_STORE.
8133 We replace
8134 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
8135 loaded_val = *addr;
8137 and replace
8138 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
8139 *addr = stored_val;
8142 static bool
8143 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
8144 tree addr, tree loaded_val, tree stored_val)
8146 gimple_stmt_iterator si;
8147 gimple stmt;
8148 tree t;
8150 si = gsi_last_bb (load_bb);
8151 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8153 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
8154 t = build_call_expr (t, 0);
8155 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8157 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
8158 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8159 gsi_remove (&si, true);
8161 si = gsi_last_bb (store_bb);
8162 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8164 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
8165 stored_val);
8166 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8168 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
8169 t = build_call_expr (t, 0);
8170 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8171 gsi_remove (&si, true);
8173 if (gimple_in_ssa_p (cfun))
8174 update_ssa (TODO_update_ssa_no_phi);
8175 return true;
8178 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
8179 using expand_omp_atomic_fetch_op. If it failed, we try to
8180 call expand_omp_atomic_pipeline, and if it fails too, the
8181 ultimate fallback is wrapping the operation in a mutex
8182 (expand_omp_atomic_mutex). REGION is the atomic region built
8183 by build_omp_regions_1(). */
8185 static void
8186 expand_omp_atomic (struct omp_region *region)
8188 basic_block load_bb = region->entry, store_bb = region->exit;
8189 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
8190 tree loaded_val = gimple_omp_atomic_load_lhs (load);
8191 tree addr = gimple_omp_atomic_load_rhs (load);
8192 tree stored_val = gimple_omp_atomic_store_val (store);
8193 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8194 HOST_WIDE_INT index;
8196 /* Make sure the type is one of the supported sizes. */
8197 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
8198 index = exact_log2 (index);
8199 if (index >= 0 && index <= 4)
8201 unsigned int align = TYPE_ALIGN_UNIT (type);
8203 /* __sync builtins require strict data alignment. */
8204 if (exact_log2 (align) >= index)
8206 /* Atomic load. */
8207 if (loaded_val == stored_val
8208 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8209 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8210 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8211 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
8212 return;
8214 /* Atomic store. */
8215 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8216 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8217 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8218 && store_bb == single_succ (load_bb)
8219 && first_stmt (store_bb) == store
8220 && expand_omp_atomic_store (load_bb, addr, loaded_val,
8221 stored_val, index))
8222 return;
8224 /* When possible, use specialized atomic update functions. */
8225 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
8226 && store_bb == single_succ (load_bb)
8227 && expand_omp_atomic_fetch_op (load_bb, addr,
8228 loaded_val, stored_val, index))
8229 return;
8231 /* If we don't have specialized __sync builtins, try and implement
8232 as a compare and swap loop. */
8233 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
8234 loaded_val, stored_val, index))
8235 return;
8239 /* The ultimate fallback is wrapping the operation in a mutex. */
8240 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
8244 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
8246 static void
8247 expand_omp_target (struct omp_region *region)
8249 basic_block entry_bb, exit_bb, new_bb;
8250 struct function *child_cfun = NULL;
8251 tree child_fn = NULL_TREE, block, t;
8252 gimple_stmt_iterator gsi;
8253 gimple entry_stmt, stmt;
8254 edge e;
8256 entry_stmt = last_stmt (region->entry);
8257 new_bb = region->entry;
8258 int kind = gimple_omp_target_kind (entry_stmt);
8259 if (kind == GF_OMP_TARGET_KIND_REGION)
8261 child_fn = gimple_omp_target_child_fn (entry_stmt);
8262 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
8265 entry_bb = region->entry;
8266 exit_bb = region->exit;
8268 if (kind == GF_OMP_TARGET_KIND_REGION)
8270 unsigned srcidx, dstidx, num;
8272 /* If the target region needs data sent from the parent
8273 function, then the very first statement (except possible
8274 tree profile counter updates) of the parallel body
8275 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
8276 &.OMP_DATA_O is passed as an argument to the child function,
8277 we need to replace it with the argument as seen by the child
8278 function.
8280 In most cases, this will end up being the identity assignment
8281 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
8282 a function call that has been inlined, the original PARM_DECL
8283 .OMP_DATA_I may have been converted into a different local
8284 variable. In which case, we need to keep the assignment. */
8285 if (gimple_omp_target_data_arg (entry_stmt))
8287 basic_block entry_succ_bb = single_succ (entry_bb);
8288 gimple_stmt_iterator gsi;
8289 tree arg;
8290 gimple tgtcopy_stmt = NULL;
8291 tree sender
8292 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
8294 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
8296 gcc_assert (!gsi_end_p (gsi));
8297 stmt = gsi_stmt (gsi);
8298 if (gimple_code (stmt) != GIMPLE_ASSIGN)
8299 continue;
8301 if (gimple_num_ops (stmt) == 2)
8303 tree arg = gimple_assign_rhs1 (stmt);
8305 /* We're ignoring the subcode because we're
8306 effectively doing a STRIP_NOPS. */
8308 if (TREE_CODE (arg) == ADDR_EXPR
8309 && TREE_OPERAND (arg, 0) == sender)
8311 tgtcopy_stmt = stmt;
8312 break;
8317 gcc_assert (tgtcopy_stmt != NULL);
8318 arg = DECL_ARGUMENTS (child_fn);
8320 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
8321 gsi_remove (&gsi, true);
8324 /* Declare local variables needed in CHILD_CFUN. */
8325 block = DECL_INITIAL (child_fn);
8326 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
8327 /* The gimplifier could record temporaries in target block
8328 rather than in containing function's local_decls chain,
8329 which would mean cgraph missed finalizing them. Do it now. */
8330 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
8331 if (TREE_CODE (t) == VAR_DECL
8332 && TREE_STATIC (t)
8333 && !DECL_EXTERNAL (t))
8334 varpool_node::finalize_decl (t);
8335 DECL_SAVED_TREE (child_fn) = NULL;
8336 /* We'll create a CFG for child_fn, so no gimple body is needed. */
8337 gimple_set_body (child_fn, NULL);
8338 TREE_USED (block) = 1;
8340 /* Reset DECL_CONTEXT on function arguments. */
8341 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
8342 DECL_CONTEXT (t) = child_fn;
8344 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
8345 so that it can be moved to the child function. */
8346 gsi = gsi_last_bb (entry_bb);
8347 stmt = gsi_stmt (gsi);
8348 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
8349 && gimple_omp_target_kind (stmt)
8350 == GF_OMP_TARGET_KIND_REGION);
8351 gsi_remove (&gsi, true);
8352 e = split_block (entry_bb, stmt);
8353 entry_bb = e->dest;
8354 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8356 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
8357 if (exit_bb)
8359 gsi = gsi_last_bb (exit_bb);
8360 gcc_assert (!gsi_end_p (gsi)
8361 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
8362 stmt = gimple_build_return (NULL);
8363 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8364 gsi_remove (&gsi, true);
8367 /* Move the target region into CHILD_CFUN. */
8369 block = gimple_block (entry_stmt);
8371 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
8372 if (exit_bb)
8373 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
8374 /* When the OMP expansion process cannot guarantee an up-to-date
8375 loop tree arrange for the child function to fixup loops. */
8376 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8377 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8379 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8380 num = vec_safe_length (child_cfun->local_decls);
8381 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8383 t = (*child_cfun->local_decls)[srcidx];
8384 if (DECL_CONTEXT (t) == cfun->decl)
8385 continue;
8386 if (srcidx != dstidx)
8387 (*child_cfun->local_decls)[dstidx] = t;
8388 dstidx++;
8390 if (dstidx != num)
8391 vec_safe_truncate (child_cfun->local_decls, dstidx);
8393 /* Inform the callgraph about the new function. */
8394 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
8395 cgraph_node::add_new_function (child_fn, true);
8397 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8398 fixed in a following pass. */
8399 push_cfun (child_cfun);
8400 cgraph_edge::rebuild_edges ();
8402 /* Some EH regions might become dead, see PR34608. If
8403 pass_cleanup_cfg isn't the first pass to happen with the
8404 new child, these dead EH edges might cause problems.
8405 Clean them up now. */
8406 if (flag_exceptions)
8408 basic_block bb;
8409 bool changed = false;
8411 FOR_EACH_BB_FN (bb, cfun)
8412 changed |= gimple_purge_dead_eh_edges (bb);
8413 if (changed)
8414 cleanup_tree_cfg ();
8416 pop_cfun ();
8419 /* Emit a library call to launch the target region, or do data
8420 transfers. */
8421 tree t1, t2, t3, t4, device, cond, c, clauses;
8422 enum built_in_function start_ix;
8423 location_t clause_loc;
8425 clauses = gimple_omp_target_clauses (entry_stmt);
8427 if (kind == GF_OMP_TARGET_KIND_REGION)
8428 start_ix = BUILT_IN_GOMP_TARGET;
8429 else if (kind == GF_OMP_TARGET_KIND_DATA)
8430 start_ix = BUILT_IN_GOMP_TARGET_DATA;
8431 else
8432 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
8434 /* By default, the value of DEVICE is -1 (let runtime library choose)
8435 and there is no conditional. */
8436 cond = NULL_TREE;
8437 device = build_int_cst (integer_type_node, -1);
8439 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
8440 if (c)
8441 cond = OMP_CLAUSE_IF_EXPR (c);
8443 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
8444 if (c)
8446 device = OMP_CLAUSE_DEVICE_ID (c);
8447 clause_loc = OMP_CLAUSE_LOCATION (c);
8449 else
8450 clause_loc = gimple_location (entry_stmt);
8452 /* Ensure 'device' is of the correct type. */
8453 device = fold_convert_loc (clause_loc, integer_type_node, device);
8455 /* If we found the clause 'if (cond)', build
8456 (cond ? device : -2). */
8457 if (cond)
8459 cond = gimple_boolify (cond);
8461 basic_block cond_bb, then_bb, else_bb;
8462 edge e;
8463 tree tmp_var;
8465 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
8466 if (kind != GF_OMP_TARGET_KIND_REGION)
8468 gsi = gsi_last_bb (new_bb);
8469 gsi_prev (&gsi);
8470 e = split_block (new_bb, gsi_stmt (gsi));
8472 else
8473 e = split_block (new_bb, NULL);
8474 cond_bb = e->src;
8475 new_bb = e->dest;
8476 remove_edge (e);
8478 then_bb = create_empty_bb (cond_bb);
8479 else_bb = create_empty_bb (then_bb);
8480 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
8481 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
8483 stmt = gimple_build_cond_empty (cond);
8484 gsi = gsi_last_bb (cond_bb);
8485 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8487 gsi = gsi_start_bb (then_bb);
8488 stmt = gimple_build_assign (tmp_var, device);
8489 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8491 gsi = gsi_start_bb (else_bb);
8492 stmt = gimple_build_assign (tmp_var,
8493 build_int_cst (integer_type_node, -2));
8494 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
8496 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
8497 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
8498 add_bb_to_loop (then_bb, cond_bb->loop_father);
8499 add_bb_to_loop (else_bb, cond_bb->loop_father);
8500 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
8501 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
8503 device = tmp_var;
8506 gsi = gsi_last_bb (new_bb);
8507 t = gimple_omp_target_data_arg (entry_stmt);
8508 if (t == NULL)
8510 t1 = size_zero_node;
8511 t2 = build_zero_cst (ptr_type_node);
8512 t3 = t2;
8513 t4 = t2;
8515 else
8517 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8518 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8519 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8520 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8521 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8524 gimple g;
8525 /* FIXME: This will be address of
8526 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8527 symbol, as soon as the linker plugin is able to create it for us. */
8528 tree openmp_target = build_zero_cst (ptr_type_node);
8529 if (kind == GF_OMP_TARGET_KIND_REGION)
8531 tree fnaddr = build_fold_addr_expr (child_fn);
8532 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8533 device, fnaddr, openmp_target, t1, t2, t3, t4);
8535 else
8536 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8537 device, openmp_target, t1, t2, t3, t4);
8538 gimple_set_location (g, gimple_location (entry_stmt));
8539 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8540 if (kind != GF_OMP_TARGET_KIND_REGION)
8542 g = gsi_stmt (gsi);
8543 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8544 gsi_remove (&gsi, true);
8546 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8548 gsi = gsi_last_bb (region->exit);
8549 g = gsi_stmt (gsi);
8550 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8551 gsi_remove (&gsi, true);
8556 /* Expand the parallel region tree rooted at REGION. Expansion
8557 proceeds in depth-first order. Innermost regions are expanded
8558 first. This way, parallel regions that require a new function to
8559 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8560 internal dependencies in their body. */
8562 static void
8563 expand_omp (struct omp_region *region)
8565 while (region)
8567 location_t saved_location;
8568 gimple inner_stmt = NULL;
8570 /* First, determine whether this is a combined parallel+workshare
8571 region. */
8572 if (region->type == GIMPLE_OMP_PARALLEL)
8573 determine_parallel_type (region);
8575 if (region->type == GIMPLE_OMP_FOR
8576 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8577 inner_stmt = last_stmt (region->inner->entry);
8579 if (region->inner)
8580 expand_omp (region->inner);
8582 saved_location = input_location;
8583 if (gimple_has_location (last_stmt (region->entry)))
8584 input_location = gimple_location (last_stmt (region->entry));
8586 switch (region->type)
8588 case GIMPLE_OMP_PARALLEL:
8589 case GIMPLE_OMP_TASK:
8590 expand_omp_taskreg (region);
8591 break;
8593 case GIMPLE_OMP_FOR:
8594 expand_omp_for (region, inner_stmt);
8595 break;
8597 case GIMPLE_OMP_SECTIONS:
8598 expand_omp_sections (region);
8599 break;
8601 case GIMPLE_OMP_SECTION:
8602 /* Individual omp sections are handled together with their
8603 parent GIMPLE_OMP_SECTIONS region. */
8604 break;
8606 case GIMPLE_OMP_SINGLE:
8607 expand_omp_single (region);
8608 break;
8610 case GIMPLE_OMP_MASTER:
8611 case GIMPLE_OMP_TASKGROUP:
8612 case GIMPLE_OMP_ORDERED:
8613 case GIMPLE_OMP_CRITICAL:
8614 case GIMPLE_OMP_TEAMS:
8615 expand_omp_synch (region);
8616 break;
8618 case GIMPLE_OMP_ATOMIC_LOAD:
8619 expand_omp_atomic (region);
8620 break;
8622 case GIMPLE_OMP_TARGET:
8623 expand_omp_target (region);
8624 break;
8626 default:
8627 gcc_unreachable ();
8630 input_location = saved_location;
8631 region = region->next;
8636 /* Helper for build_omp_regions. Scan the dominator tree starting at
8637 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8638 true, the function ends once a single tree is built (otherwise, whole
8639 forest of OMP constructs may be built). */
8641 static void
8642 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8643 bool single_tree)
8645 gimple_stmt_iterator gsi;
8646 gimple stmt;
8647 basic_block son;
8649 gsi = gsi_last_bb (bb);
8650 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8652 struct omp_region *region;
8653 enum gimple_code code;
8655 stmt = gsi_stmt (gsi);
8656 code = gimple_code (stmt);
8657 if (code == GIMPLE_OMP_RETURN)
8659 /* STMT is the return point out of region PARENT. Mark it
8660 as the exit point and make PARENT the immediately
8661 enclosing region. */
8662 gcc_assert (parent);
8663 region = parent;
8664 region->exit = bb;
8665 parent = parent->outer;
8667 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8669 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8670 GIMPLE_OMP_RETURN, but matches with
8671 GIMPLE_OMP_ATOMIC_LOAD. */
8672 gcc_assert (parent);
8673 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8674 region = parent;
8675 region->exit = bb;
8676 parent = parent->outer;
8679 else if (code == GIMPLE_OMP_CONTINUE)
8681 gcc_assert (parent);
8682 parent->cont = bb;
8684 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8686 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8687 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8690 else if (code == GIMPLE_OMP_TARGET
8691 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8692 new_omp_region (bb, code, parent);
8693 else
8695 /* Otherwise, this directive becomes the parent for a new
8696 region. */
8697 region = new_omp_region (bb, code, parent);
8698 parent = region;
8702 if (single_tree && !parent)
8703 return;
8705 for (son = first_dom_son (CDI_DOMINATORS, bb);
8706 son;
8707 son = next_dom_son (CDI_DOMINATORS, son))
8708 build_omp_regions_1 (son, parent, single_tree);
8711 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8712 root_omp_region. */
8714 static void
8715 build_omp_regions_root (basic_block root)
8717 gcc_assert (root_omp_region == NULL);
8718 build_omp_regions_1 (root, NULL, true);
8719 gcc_assert (root_omp_region != NULL);
8722 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8724 void
8725 omp_expand_local (basic_block head)
8727 build_omp_regions_root (head);
8728 if (dump_file && (dump_flags & TDF_DETAILS))
8730 fprintf (dump_file, "\nOMP region tree\n\n");
8731 dump_omp_region (dump_file, root_omp_region, 0);
8732 fprintf (dump_file, "\n");
8735 remove_exit_barriers (root_omp_region);
8736 expand_omp (root_omp_region);
8738 free_omp_regions ();
8741 /* Scan the CFG and build a tree of OMP regions. Return the root of
8742 the OMP region tree. */
8744 static void
8745 build_omp_regions (void)
8747 gcc_assert (root_omp_region == NULL);
8748 calculate_dominance_info (CDI_DOMINATORS);
8749 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
8752 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8754 static unsigned int
8755 execute_expand_omp (void)
8757 build_omp_regions ();
8759 if (!root_omp_region)
8760 return 0;
8762 if (dump_file)
8764 fprintf (dump_file, "\nOMP region tree\n\n");
8765 dump_omp_region (dump_file, root_omp_region, 0);
8766 fprintf (dump_file, "\n");
8769 remove_exit_barriers (root_omp_region);
8771 expand_omp (root_omp_region);
8773 cleanup_tree_cfg ();
8775 free_omp_regions ();
8777 return 0;
8780 /* OMP expansion -- the default pass, run before creation of SSA form. */
8782 namespace {
8784 const pass_data pass_data_expand_omp =
8786 GIMPLE_PASS, /* type */
8787 "ompexp", /* name */
8788 OPTGROUP_NONE, /* optinfo_flags */
8789 TV_NONE, /* tv_id */
8790 PROP_gimple_any, /* properties_required */
8791 0, /* properties_provided */
8792 0, /* properties_destroyed */
8793 0, /* todo_flags_start */
8794 0, /* todo_flags_finish */
8797 class pass_expand_omp : public gimple_opt_pass
8799 public:
8800 pass_expand_omp (gcc::context *ctxt)
8801 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8804 /* opt_pass methods: */
8805 virtual bool gate (function *)
8807 return ((flag_openmp != 0 || flag_openmp_simd != 0
8808 || flag_cilkplus != 0) && !seen_error ());
8811 virtual unsigned int execute (function *) { return execute_expand_omp (); }
8813 }; // class pass_expand_omp
8815 } // anon namespace
8817 gimple_opt_pass *
8818 make_pass_expand_omp (gcc::context *ctxt)
8820 return new pass_expand_omp (ctxt);
8823 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8825 /* If ctx is a worksharing context inside of a cancellable parallel
8826 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8827 and conditional branch to parallel's cancel_label to handle
8828 cancellation in the implicit barrier. */
8830 static void
8831 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8833 gimple omp_return = gimple_seq_last_stmt (*body);
8834 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8835 if (gimple_omp_return_nowait_p (omp_return))
8836 return;
8837 if (ctx->outer
8838 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8839 && ctx->outer->cancellable)
8841 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
8842 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
8843 tree lhs = create_tmp_var (c_bool_type, NULL);
8844 gimple_omp_return_set_lhs (omp_return, lhs);
8845 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8846 gimple g = gimple_build_cond (NE_EXPR, lhs,
8847 fold_convert (c_bool_type,
8848 boolean_false_node),
8849 ctx->outer->cancel_label, fallthru_label);
8850 gimple_seq_add_stmt (body, g);
8851 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8855 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8856 CTX is the enclosing OMP context for the current statement. */
8858 static void
8859 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8861 tree block, control;
8862 gimple_stmt_iterator tgsi;
8863 gimple stmt, t;
8864 gimple_bind new_stmt, bind;
8865 gimple_seq ilist, dlist, olist, new_body;
8867 stmt = gsi_stmt (*gsi_p);
8869 push_gimplify_context ();
8871 dlist = NULL;
8872 ilist = NULL;
8873 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8874 &ilist, &dlist, ctx, NULL);
8876 new_body = gimple_omp_body (stmt);
8877 gimple_omp_set_body (stmt, NULL);
8878 tgsi = gsi_start (new_body);
8879 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8881 omp_context *sctx;
8882 gimple sec_start;
8884 sec_start = gsi_stmt (tgsi);
8885 sctx = maybe_lookup_ctx (sec_start);
8886 gcc_assert (sctx);
8888 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8889 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8890 GSI_CONTINUE_LINKING);
8891 gimple_omp_set_body (sec_start, NULL);
8893 if (gsi_one_before_end_p (tgsi))
8895 gimple_seq l = NULL;
8896 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8897 &l, ctx);
8898 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8899 gimple_omp_section_set_last (sec_start);
8902 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8903 GSI_CONTINUE_LINKING);
8906 block = make_node (BLOCK);
8907 bind = gimple_build_bind (NULL, new_body, block);
8909 olist = NULL;
8910 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
8912 block = make_node (BLOCK);
8913 new_stmt = gimple_build_bind (NULL, NULL, block);
8914 gsi_replace (gsi_p, new_stmt, true);
8916 pop_gimplify_context (new_stmt);
8917 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8918 BLOCK_VARS (block) = gimple_bind_vars (bind);
8919 if (BLOCK_VARS (block))
8920 TREE_USED (block) = 1;
8922 new_body = NULL;
8923 gimple_seq_add_seq (&new_body, ilist);
8924 gimple_seq_add_stmt (&new_body, stmt);
8925 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8926 gimple_seq_add_stmt (&new_body, bind);
8928 control = create_tmp_var (unsigned_type_node, ".section");
8929 t = gimple_build_omp_continue (control, control);
8930 gimple_omp_sections_set_control (stmt, control);
8931 gimple_seq_add_stmt (&new_body, t);
8933 gimple_seq_add_seq (&new_body, olist);
8934 if (ctx->cancellable)
8935 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
8936 gimple_seq_add_seq (&new_body, dlist);
8938 new_body = maybe_catch_exception (new_body);
8940 t = gimple_build_omp_return
8941 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8942 OMP_CLAUSE_NOWAIT));
8943 gimple_seq_add_stmt (&new_body, t);
8944 maybe_add_implicit_barrier_cancel (ctx, &new_body);
8946 gimple_bind_set_body (new_stmt, new_body);
8950 /* A subroutine of lower_omp_single. Expand the simple form of
8951 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8953 if (GOMP_single_start ())
8954 BODY;
8955 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8957 FIXME. It may be better to delay expanding the logic of this until
8958 pass_expand_omp. The expanded logic may make the job more difficult
8959 to a synchronization analysis pass. */
8961 static void
8962 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
8964 location_t loc = gimple_location (single_stmt);
8965 tree tlabel = create_artificial_label (loc);
8966 tree flabel = create_artificial_label (loc);
8967 gimple call, cond;
8968 tree lhs, decl;
8970 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
8971 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8972 call = gimple_build_call (decl, 0);
8973 gimple_call_set_lhs (call, lhs);
8974 gimple_seq_add_stmt (pre_p, call);
8976 cond = gimple_build_cond (EQ_EXPR, lhs,
8977 fold_convert_loc (loc, TREE_TYPE (lhs),
8978 boolean_true_node),
8979 tlabel, flabel);
8980 gimple_seq_add_stmt (pre_p, cond);
8981 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
8982 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8983 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
8987 /* A subroutine of lower_omp_single. Expand the simple form of
8988 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
8990 #pragma omp single copyprivate (a, b, c)
8992 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
8995 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
8997 BODY;
8998 copyout.a = a;
8999 copyout.b = b;
9000 copyout.c = c;
9001 GOMP_single_copy_end (&copyout);
9003 else
9005 a = copyout_p->a;
9006 b = copyout_p->b;
9007 c = copyout_p->c;
9009 GOMP_barrier ();
9012 FIXME. It may be better to delay expanding the logic of this until
9013 pass_expand_omp. The expanded logic may make the job more difficult
9014 to a synchronization analysis pass. */
9016 static void
9017 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
9019 tree ptr_type, t, l0, l1, l2, bfn_decl;
9020 gimple_seq copyin_seq;
9021 location_t loc = gimple_location (single_stmt);
9023 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
9025 ptr_type = build_pointer_type (ctx->record_type);
9026 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
9028 l0 = create_artificial_label (loc);
9029 l1 = create_artificial_label (loc);
9030 l2 = create_artificial_label (loc);
9032 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
9033 t = build_call_expr_loc (loc, bfn_decl, 0);
9034 t = fold_convert_loc (loc, ptr_type, t);
9035 gimplify_assign (ctx->receiver_decl, t, pre_p);
9037 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
9038 build_int_cst (ptr_type, 0));
9039 t = build3 (COND_EXPR, void_type_node, t,
9040 build_and_jump (&l0), build_and_jump (&l1));
9041 gimplify_and_add (t, pre_p);
9043 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
9045 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
9047 copyin_seq = NULL;
9048 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
9049 &copyin_seq, ctx);
9051 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9052 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
9053 t = build_call_expr_loc (loc, bfn_decl, 1, t);
9054 gimplify_and_add (t, pre_p);
9056 t = build_and_jump (&l2);
9057 gimplify_and_add (t, pre_p);
9059 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
9061 gimple_seq_add_seq (pre_p, copyin_seq);
9063 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
9067 /* Expand code for an OpenMP single directive. */
9069 static void
9070 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9072 tree block;
9073 gimple t, single_stmt = gsi_stmt (*gsi_p);
9074 gimple_bind bind;
9075 gimple_seq bind_body, bind_body_tail = NULL, dlist;
9077 push_gimplify_context ();
9079 block = make_node (BLOCK);
9080 bind = gimple_build_bind (NULL, NULL, block);
9081 gsi_replace (gsi_p, bind, true);
9082 bind_body = NULL;
9083 dlist = NULL;
9084 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
9085 &bind_body, &dlist, ctx, NULL);
9086 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
9088 gimple_seq_add_stmt (&bind_body, single_stmt);
9090 if (ctx->record_type)
9091 lower_omp_single_copy (single_stmt, &bind_body, ctx);
9092 else
9093 lower_omp_single_simple (single_stmt, &bind_body);
9095 gimple_omp_set_body (single_stmt, NULL);
9097 gimple_seq_add_seq (&bind_body, dlist);
9099 bind_body = maybe_catch_exception (bind_body);
9101 t = gimple_build_omp_return
9102 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
9103 OMP_CLAUSE_NOWAIT));
9104 gimple_seq_add_stmt (&bind_body_tail, t);
9105 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
9106 if (ctx->record_type)
9108 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
9109 tree clobber = build_constructor (ctx->record_type, NULL);
9110 TREE_THIS_VOLATILE (clobber) = 1;
9111 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
9112 clobber), GSI_SAME_STMT);
9114 gimple_seq_add_seq (&bind_body, bind_body_tail);
9115 gimple_bind_set_body (bind, bind_body);
9117 pop_gimplify_context (bind);
9119 gimple_bind_append_vars (bind, ctx->block_vars);
9120 BLOCK_VARS (block) = ctx->block_vars;
9121 if (BLOCK_VARS (block))
9122 TREE_USED (block) = 1;
9126 /* Expand code for an OpenMP master directive. */
9128 static void
9129 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9131 tree block, lab = NULL, x, bfn_decl;
9132 gimple stmt = gsi_stmt (*gsi_p);
9133 gimple_bind bind;
9134 location_t loc = gimple_location (stmt);
9135 gimple_seq tseq;
9137 push_gimplify_context ();
9139 block = make_node (BLOCK);
9140 bind = gimple_build_bind (NULL, NULL, block);
9141 gsi_replace (gsi_p, bind, true);
9142 gimple_bind_add_stmt (bind, stmt);
9144 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9145 x = build_call_expr_loc (loc, bfn_decl, 0);
9146 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
9147 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
9148 tseq = NULL;
9149 gimplify_and_add (x, &tseq);
9150 gimple_bind_add_seq (bind, tseq);
9152 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9153 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9154 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9155 gimple_omp_set_body (stmt, NULL);
9157 gimple_bind_add_stmt (bind, gimple_build_label (lab));
9159 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9161 pop_gimplify_context (bind);
9163 gimple_bind_append_vars (bind, ctx->block_vars);
9164 BLOCK_VARS (block) = ctx->block_vars;
9168 /* Expand code for an OpenMP taskgroup directive. */
9170 static void
9171 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9173 gimple stmt = gsi_stmt (*gsi_p), x;
9174 gimple_bind bind;
9175 tree block = make_node (BLOCK);
9177 bind = gimple_build_bind (NULL, NULL, block);
9178 gsi_replace (gsi_p, bind, true);
9179 gimple_bind_add_stmt (bind, stmt);
9181 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
9183 gimple_bind_add_stmt (bind, x);
9185 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9186 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9187 gimple_omp_set_body (stmt, NULL);
9189 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9191 gimple_bind_append_vars (bind, ctx->block_vars);
9192 BLOCK_VARS (block) = ctx->block_vars;
9196 /* Expand code for an OpenMP ordered directive. */
9198 static void
9199 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9201 tree block;
9202 gimple stmt = gsi_stmt (*gsi_p), x;
9203 gimple_bind bind;
9205 push_gimplify_context ();
9207 block = make_node (BLOCK);
9208 bind = gimple_build_bind (NULL, NULL, block);
9209 gsi_replace (gsi_p, bind, true);
9210 gimple_bind_add_stmt (bind, stmt);
9212 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
9214 gimple_bind_add_stmt (bind, x);
9216 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9217 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9218 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9219 gimple_omp_set_body (stmt, NULL);
9221 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
9222 gimple_bind_add_stmt (bind, x);
9224 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9226 pop_gimplify_context (bind);
9228 gimple_bind_append_vars (bind, ctx->block_vars);
9229 BLOCK_VARS (block) = gimple_bind_vars (bind);
9233 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
9234 substitution of a couple of function calls. But in the NAMED case,
9235 requires that languages coordinate a symbol name. It is therefore
9236 best put here in common code. */
9238 static GTY((param1_is (tree), param2_is (tree)))
9239 splay_tree critical_name_mutexes;
9241 static void
9242 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9244 tree block;
9245 tree name, lock, unlock;
9246 gimple stmt = gsi_stmt (*gsi_p);
9247 gimple_bind bind;
9248 location_t loc = gimple_location (stmt);
9249 gimple_seq tbody;
9251 name = gimple_omp_critical_name (stmt);
9252 if (name)
9254 tree decl;
9255 splay_tree_node n;
9257 if (!critical_name_mutexes)
9258 critical_name_mutexes
9259 = splay_tree_new_ggc (splay_tree_compare_pointers,
9260 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9261 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9263 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
9264 if (n == NULL)
9266 char *new_str;
9268 decl = create_tmp_var_raw (ptr_type_node, NULL);
9270 new_str = ACONCAT ((".gomp_critical_user_",
9271 IDENTIFIER_POINTER (name), NULL));
9272 DECL_NAME (decl) = get_identifier (new_str);
9273 TREE_PUBLIC (decl) = 1;
9274 TREE_STATIC (decl) = 1;
9275 DECL_COMMON (decl) = 1;
9276 DECL_ARTIFICIAL (decl) = 1;
9277 DECL_IGNORED_P (decl) = 1;
9278 varpool_node::finalize_decl (decl);
9280 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
9281 (splay_tree_value) decl);
9283 else
9284 decl = (tree) n->value;
9286 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
9287 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
9289 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
9290 unlock = build_call_expr_loc (loc, unlock, 1,
9291 build_fold_addr_expr_loc (loc, decl));
9293 else
9295 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
9296 lock = build_call_expr_loc (loc, lock, 0);
9298 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
9299 unlock = build_call_expr_loc (loc, unlock, 0);
9302 push_gimplify_context ();
9304 block = make_node (BLOCK);
9305 bind = gimple_build_bind (NULL, NULL, block);
9306 gsi_replace (gsi_p, bind, true);
9307 gimple_bind_add_stmt (bind, stmt);
9309 tbody = gimple_bind_body (bind);
9310 gimplify_and_add (lock, &tbody);
9311 gimple_bind_set_body (bind, tbody);
9313 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9314 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
9315 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
9316 gimple_omp_set_body (stmt, NULL);
9318 tbody = gimple_bind_body (bind);
9319 gimplify_and_add (unlock, &tbody);
9320 gimple_bind_set_body (bind, tbody);
9322 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
9324 pop_gimplify_context (bind);
9325 gimple_bind_append_vars (bind, ctx->block_vars);
9326 BLOCK_VARS (block) = gimple_bind_vars (bind);
9330 /* A subroutine of lower_omp_for. Generate code to emit the predicate
9331 for a lastprivate clause. Given a loop control predicate of (V
9332 cond N2), we gate the clause on (!(V cond N2)). The lowered form
9333 is appended to *DLIST, iterator initialization is appended to
9334 *BODY_P. */
9336 static void
9337 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
9338 gimple_seq *dlist, struct omp_context *ctx)
9340 tree clauses, cond, vinit;
9341 enum tree_code cond_code;
9342 gimple_seq stmts;
9344 cond_code = fd->loop.cond_code;
9345 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
9347 /* When possible, use a strict equality expression. This can let VRP
9348 type optimizations deduce the value and remove a copy. */
9349 if (tree_fits_shwi_p (fd->loop.step))
9351 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
9352 if (step == 1 || step == -1)
9353 cond_code = EQ_EXPR;
9356 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
9358 clauses = gimple_omp_for_clauses (fd->for_stmt);
9359 stmts = NULL;
9360 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
9361 if (!gimple_seq_empty_p (stmts))
9363 gimple_seq_add_seq (&stmts, *dlist);
9364 *dlist = stmts;
9366 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
9367 vinit = fd->loop.n1;
9368 if (cond_code == EQ_EXPR
9369 && tree_fits_shwi_p (fd->loop.n2)
9370 && ! integer_zerop (fd->loop.n2))
9371 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
9372 else
9373 vinit = unshare_expr (vinit);
9375 /* Initialize the iterator variable, so that threads that don't execute
9376 any iterations don't execute the lastprivate clauses by accident. */
9377 gimplify_assign (fd->loop.v, vinit, body_p);
9382 /* Lower code for an OpenMP loop directive. */
9384 static void
9385 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9387 tree *rhs_p, block;
9388 struct omp_for_data fd, *fdp = NULL;
9389 gimple stmt = gsi_stmt (*gsi_p);
9390 gimple_bind new_stmt;
9391 gimple_seq omp_for_body, body, dlist;
9392 size_t i;
9394 push_gimplify_context ();
9396 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
9398 block = make_node (BLOCK);
9399 new_stmt = gimple_build_bind (NULL, NULL, block);
9400 /* Replace at gsi right away, so that 'stmt' is no member
9401 of a sequence anymore as we're going to add to to a different
9402 one below. */
9403 gsi_replace (gsi_p, new_stmt, true);
9405 /* Move declaration of temporaries in the loop body before we make
9406 it go away. */
9407 omp_for_body = gimple_omp_body (stmt);
9408 if (!gimple_seq_empty_p (omp_for_body)
9409 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
9411 gimple_bind inner_bind =
9412 as_a <gimple_bind> (gimple_seq_first_stmt (omp_for_body));
9413 tree vars = gimple_bind_vars (inner_bind);
9414 gimple_bind_append_vars (new_stmt, vars);
9415 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
9416 keep them on the inner_bind and it's block. */
9417 gimple_bind_set_vars (inner_bind, NULL_TREE);
9418 if (gimple_bind_block (inner_bind))
9419 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
9422 if (gimple_omp_for_combined_into_p (stmt))
9424 extract_omp_for_data (stmt, &fd, NULL);
9425 fdp = &fd;
9427 /* We need two temporaries with fd.loop.v type (istart/iend)
9428 and then (fd.collapse - 1) temporaries with the same
9429 type for count2 ... countN-1 vars if not constant. */
9430 size_t count = 2;
9431 tree type = fd.iter_type;
9432 if (fd.collapse > 1
9433 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
9434 count += fd.collapse - 1;
9435 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
9436 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
9437 tree clauses = *pc;
9438 if (parallel_for)
9439 outerc
9440 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
9441 OMP_CLAUSE__LOOPTEMP_);
9442 for (i = 0; i < count; i++)
9444 tree temp;
9445 if (parallel_for)
9447 gcc_assert (outerc);
9448 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
9449 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
9450 OMP_CLAUSE__LOOPTEMP_);
9452 else
9454 temp = create_tmp_var (type, NULL);
9455 insert_decl_map (&ctx->outer->cb, temp, temp);
9457 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
9458 OMP_CLAUSE_DECL (*pc) = temp;
9459 pc = &OMP_CLAUSE_CHAIN (*pc);
9461 *pc = clauses;
9464 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
9465 dlist = NULL;
9466 body = NULL;
9467 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
9468 fdp);
9469 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
9471 lower_omp (gimple_omp_body_ptr (stmt), ctx);
9473 /* Lower the header expressions. At this point, we can assume that
9474 the header is of the form:
9476 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
9478 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
9479 using the .omp_data_s mapping, if needed. */
9480 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
9482 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
9483 if (!is_gimple_min_invariant (*rhs_p))
9484 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9486 rhs_p = gimple_omp_for_final_ptr (stmt, i);
9487 if (!is_gimple_min_invariant (*rhs_p))
9488 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9490 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
9491 if (!is_gimple_min_invariant (*rhs_p))
9492 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
9495 /* Once lowered, extract the bounds and clauses. */
9496 extract_omp_for_data (stmt, &fd, NULL);
9498 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
9500 gimple_seq_add_stmt (&body, stmt);
9501 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
9503 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
9504 fd.loop.v));
9506 /* After the loop, add exit clauses. */
9507 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
9509 if (ctx->cancellable)
9510 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
9512 gimple_seq_add_seq (&body, dlist);
9514 body = maybe_catch_exception (body);
9516 /* Region exit marker goes at the end of the loop body. */
9517 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
9518 maybe_add_implicit_barrier_cancel (ctx, &body);
9519 pop_gimplify_context (new_stmt);
9521 gimple_bind_append_vars (new_stmt, ctx->block_vars);
9522 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
9523 if (BLOCK_VARS (block))
9524 TREE_USED (block) = 1;
9526 gimple_bind_set_body (new_stmt, body);
9527 gimple_omp_set_body (stmt, NULL);
9528 gimple_omp_for_set_pre_body (stmt, NULL);
9531 /* Callback for walk_stmts. Check if the current statement only contains
9532 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
9534 static tree
9535 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9536 bool *handled_ops_p,
9537 struct walk_stmt_info *wi)
9539 int *info = (int *) wi->info;
9540 gimple stmt = gsi_stmt (*gsi_p);
9542 *handled_ops_p = true;
9543 switch (gimple_code (stmt))
9545 WALK_SUBSTMTS;
9547 case GIMPLE_OMP_FOR:
9548 case GIMPLE_OMP_SECTIONS:
9549 *info = *info == 0 ? 1 : -1;
9550 break;
9551 default:
9552 *info = -1;
9553 break;
9555 return NULL;
9558 struct omp_taskcopy_context
9560 /* This field must be at the beginning, as we do "inheritance": Some
9561 callback functions for tree-inline.c (e.g., omp_copy_decl)
9562 receive a copy_body_data pointer that is up-casted to an
9563 omp_context pointer. */
9564 copy_body_data cb;
9565 omp_context *ctx;
9568 static tree
9569 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9571 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9573 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9574 return create_tmp_var (TREE_TYPE (var), NULL);
9576 return var;
9579 static tree
9580 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9582 tree name, new_fields = NULL, type, f;
9584 type = lang_hooks.types.make_type (RECORD_TYPE);
9585 name = DECL_NAME (TYPE_NAME (orig_type));
9586 name = build_decl (gimple_location (tcctx->ctx->stmt),
9587 TYPE_DECL, name, type);
9588 TYPE_NAME (type) = name;
9590 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9592 tree new_f = copy_node (f);
9593 DECL_CONTEXT (new_f) = type;
9594 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9595 TREE_CHAIN (new_f) = new_fields;
9596 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9597 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9598 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9599 &tcctx->cb, NULL);
9600 new_fields = new_f;
9601 tcctx->cb.decl_map->put (f, new_f);
9603 TYPE_FIELDS (type) = nreverse (new_fields);
9604 layout_type (type);
9605 return type;
9608 /* Create task copyfn. */
9610 static void
9611 create_task_copyfn (gimple task_stmt, omp_context *ctx)
9613 struct function *child_cfun;
9614 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9615 tree record_type, srecord_type, bind, list;
9616 bool record_needs_remap = false, srecord_needs_remap = false;
9617 splay_tree_node n;
9618 struct omp_taskcopy_context tcctx;
9619 location_t loc = gimple_location (task_stmt);
9621 child_fn = gimple_omp_task_copy_fn (task_stmt);
9622 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9623 gcc_assert (child_cfun->cfg == NULL);
9624 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9626 /* Reset DECL_CONTEXT on function arguments. */
9627 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9628 DECL_CONTEXT (t) = child_fn;
9630 /* Populate the function. */
9631 push_gimplify_context ();
9632 push_cfun (child_cfun);
9634 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9635 TREE_SIDE_EFFECTS (bind) = 1;
9636 list = NULL;
9637 DECL_SAVED_TREE (child_fn) = bind;
9638 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9640 /* Remap src and dst argument types if needed. */
9641 record_type = ctx->record_type;
9642 srecord_type = ctx->srecord_type;
9643 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9644 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9646 record_needs_remap = true;
9647 break;
9649 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9650 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9652 srecord_needs_remap = true;
9653 break;
9656 if (record_needs_remap || srecord_needs_remap)
9658 memset (&tcctx, '\0', sizeof (tcctx));
9659 tcctx.cb.src_fn = ctx->cb.src_fn;
9660 tcctx.cb.dst_fn = child_fn;
9661 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
9662 gcc_checking_assert (tcctx.cb.src_node);
9663 tcctx.cb.dst_node = tcctx.cb.src_node;
9664 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9665 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9666 tcctx.cb.eh_lp_nr = 0;
9667 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9668 tcctx.cb.decl_map = new hash_map<tree, tree>;
9669 tcctx.ctx = ctx;
9671 if (record_needs_remap)
9672 record_type = task_copyfn_remap_type (&tcctx, record_type);
9673 if (srecord_needs_remap)
9674 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9676 else
9677 tcctx.cb.decl_map = NULL;
9679 arg = DECL_ARGUMENTS (child_fn);
9680 TREE_TYPE (arg) = build_pointer_type (record_type);
9681 sarg = DECL_CHAIN (arg);
9682 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9684 /* First pass: initialize temporaries used in record_type and srecord_type
9685 sizes and field offsets. */
9686 if (tcctx.cb.decl_map)
9687 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9688 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9690 tree *p;
9692 decl = OMP_CLAUSE_DECL (c);
9693 p = tcctx.cb.decl_map->get (decl);
9694 if (p == NULL)
9695 continue;
9696 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9697 sf = (tree) n->value;
9698 sf = *tcctx.cb.decl_map->get (sf);
9699 src = build_simple_mem_ref_loc (loc, sarg);
9700 src = omp_build_component_ref (src, sf);
9701 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9702 append_to_statement_list (t, &list);
9705 /* Second pass: copy shared var pointers and copy construct non-VLA
9706 firstprivate vars. */
9707 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9708 switch (OMP_CLAUSE_CODE (c))
9710 case OMP_CLAUSE_SHARED:
9711 decl = OMP_CLAUSE_DECL (c);
9712 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9713 if (n == NULL)
9714 break;
9715 f = (tree) n->value;
9716 if (tcctx.cb.decl_map)
9717 f = *tcctx.cb.decl_map->get (f);
9718 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9719 sf = (tree) n->value;
9720 if (tcctx.cb.decl_map)
9721 sf = *tcctx.cb.decl_map->get (sf);
9722 src = build_simple_mem_ref_loc (loc, sarg);
9723 src = omp_build_component_ref (src, sf);
9724 dst = build_simple_mem_ref_loc (loc, arg);
9725 dst = omp_build_component_ref (dst, f);
9726 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9727 append_to_statement_list (t, &list);
9728 break;
9729 case OMP_CLAUSE_FIRSTPRIVATE:
9730 decl = OMP_CLAUSE_DECL (c);
9731 if (is_variable_sized (decl))
9732 break;
9733 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9734 if (n == NULL)
9735 break;
9736 f = (tree) n->value;
9737 if (tcctx.cb.decl_map)
9738 f = *tcctx.cb.decl_map->get (f);
9739 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9740 if (n != NULL)
9742 sf = (tree) n->value;
9743 if (tcctx.cb.decl_map)
9744 sf = *tcctx.cb.decl_map->get (sf);
9745 src = build_simple_mem_ref_loc (loc, sarg);
9746 src = omp_build_component_ref (src, sf);
9747 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9748 src = build_simple_mem_ref_loc (loc, src);
9750 else
9751 src = decl;
9752 dst = build_simple_mem_ref_loc (loc, arg);
9753 dst = omp_build_component_ref (dst, f);
9754 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9755 append_to_statement_list (t, &list);
9756 break;
9757 case OMP_CLAUSE_PRIVATE:
9758 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9759 break;
9760 decl = OMP_CLAUSE_DECL (c);
9761 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9762 f = (tree) n->value;
9763 if (tcctx.cb.decl_map)
9764 f = *tcctx.cb.decl_map->get (f);
9765 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9766 if (n != NULL)
9768 sf = (tree) n->value;
9769 if (tcctx.cb.decl_map)
9770 sf = *tcctx.cb.decl_map->get (sf);
9771 src = build_simple_mem_ref_loc (loc, sarg);
9772 src = omp_build_component_ref (src, sf);
9773 if (use_pointer_for_field (decl, NULL))
9774 src = build_simple_mem_ref_loc (loc, src);
9776 else
9777 src = decl;
9778 dst = build_simple_mem_ref_loc (loc, arg);
9779 dst = omp_build_component_ref (dst, f);
9780 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9781 append_to_statement_list (t, &list);
9782 break;
9783 default:
9784 break;
9787 /* Last pass: handle VLA firstprivates. */
9788 if (tcctx.cb.decl_map)
9789 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9790 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9792 tree ind, ptr, df;
9794 decl = OMP_CLAUSE_DECL (c);
9795 if (!is_variable_sized (decl))
9796 continue;
9797 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9798 if (n == NULL)
9799 continue;
9800 f = (tree) n->value;
9801 f = *tcctx.cb.decl_map->get (f);
9802 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9803 ind = DECL_VALUE_EXPR (decl);
9804 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9805 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9806 n = splay_tree_lookup (ctx->sfield_map,
9807 (splay_tree_key) TREE_OPERAND (ind, 0));
9808 sf = (tree) n->value;
9809 sf = *tcctx.cb.decl_map->get (sf);
9810 src = build_simple_mem_ref_loc (loc, sarg);
9811 src = omp_build_component_ref (src, sf);
9812 src = build_simple_mem_ref_loc (loc, src);
9813 dst = build_simple_mem_ref_loc (loc, arg);
9814 dst = omp_build_component_ref (dst, f);
9815 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9816 append_to_statement_list (t, &list);
9817 n = splay_tree_lookup (ctx->field_map,
9818 (splay_tree_key) TREE_OPERAND (ind, 0));
9819 df = (tree) n->value;
9820 df = *tcctx.cb.decl_map->get (df);
9821 ptr = build_simple_mem_ref_loc (loc, arg);
9822 ptr = omp_build_component_ref (ptr, df);
9823 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9824 build_fold_addr_expr_loc (loc, dst));
9825 append_to_statement_list (t, &list);
9828 t = build1 (RETURN_EXPR, void_type_node, NULL);
9829 append_to_statement_list (t, &list);
9831 if (tcctx.cb.decl_map)
9832 delete tcctx.cb.decl_map;
9833 pop_gimplify_context (NULL);
9834 BIND_EXPR_BODY (bind) = list;
9835 pop_cfun ();
9838 static void
9839 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9841 tree c, clauses;
9842 gimple g;
9843 size_t n_in = 0, n_out = 0, idx = 2, i;
9845 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9846 OMP_CLAUSE_DEPEND);
9847 gcc_assert (clauses);
9848 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9849 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9850 switch (OMP_CLAUSE_DEPEND_KIND (c))
9852 case OMP_CLAUSE_DEPEND_IN:
9853 n_in++;
9854 break;
9855 case OMP_CLAUSE_DEPEND_OUT:
9856 case OMP_CLAUSE_DEPEND_INOUT:
9857 n_out++;
9858 break;
9859 default:
9860 gcc_unreachable ();
9862 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9863 tree array = create_tmp_var (type, NULL);
9864 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9865 NULL_TREE);
9866 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9867 gimple_seq_add_stmt (iseq, g);
9868 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9869 NULL_TREE);
9870 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9871 gimple_seq_add_stmt (iseq, g);
9872 for (i = 0; i < 2; i++)
9874 if ((i ? n_in : n_out) == 0)
9875 continue;
9876 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9877 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9878 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9880 tree t = OMP_CLAUSE_DECL (c);
9881 t = fold_convert (ptr_type_node, t);
9882 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9883 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9884 NULL_TREE, NULL_TREE);
9885 g = gimple_build_assign (r, t);
9886 gimple_seq_add_stmt (iseq, g);
9889 tree *p = gimple_omp_task_clauses_ptr (stmt);
9890 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9891 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9892 OMP_CLAUSE_CHAIN (c) = *p;
9893 *p = c;
9894 tree clobber = build_constructor (type, NULL);
9895 TREE_THIS_VOLATILE (clobber) = 1;
9896 g = gimple_build_assign (array, clobber);
9897 gimple_seq_add_stmt (oseq, g);
9900 /* Lower the OpenMP parallel or task directive in the current statement
9901 in GSI_P. CTX holds context information for the directive. */
9903 static void
9904 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9906 tree clauses;
9907 tree child_fn, t;
9908 gimple stmt = gsi_stmt (*gsi_p);
9909 gimple_bind par_bind, bind, dep_bind = NULL;
9910 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9911 location_t loc = gimple_location (stmt);
9913 clauses = gimple_omp_taskreg_clauses (stmt);
9914 par_bind =
9915 as_a <gimple_bind> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
9916 par_body = gimple_bind_body (par_bind);
9917 child_fn = ctx->cb.dst_fn;
9918 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9919 && !gimple_omp_parallel_combined_p (stmt))
9921 struct walk_stmt_info wi;
9922 int ws_num = 0;
9924 memset (&wi, 0, sizeof (wi));
9925 wi.info = &ws_num;
9926 wi.val_only = true;
9927 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
9928 if (ws_num == 1)
9929 gimple_omp_parallel_set_combined_p (stmt, true);
9931 gimple_seq dep_ilist = NULL;
9932 gimple_seq dep_olist = NULL;
9933 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9934 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9936 push_gimplify_context ();
9937 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9938 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9941 if (ctx->srecord_type)
9942 create_task_copyfn (stmt, ctx);
9944 push_gimplify_context ();
9946 par_olist = NULL;
9947 par_ilist = NULL;
9948 par_rlist = NULL;
9949 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
9950 lower_omp (&par_body, ctx);
9951 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
9952 lower_reduction_clauses (clauses, &par_rlist, ctx);
9954 /* Declare all the variables created by mapping and the variables
9955 declared in the scope of the parallel body. */
9956 record_vars_into (ctx->block_vars, child_fn);
9957 record_vars_into (gimple_bind_vars (par_bind), child_fn);
9959 if (ctx->record_type)
9961 ctx->sender_decl
9962 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9963 : ctx->record_type, ".omp_data_o");
9964 DECL_NAMELESS (ctx->sender_decl) = 1;
9965 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9966 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
9969 olist = NULL;
9970 ilist = NULL;
9971 lower_send_clauses (clauses, &ilist, &olist, ctx);
9972 lower_send_shared_vars (&ilist, &olist, ctx);
9974 if (ctx->record_type)
9976 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
9977 TREE_THIS_VOLATILE (clobber) = 1;
9978 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9979 clobber));
9982 /* Once all the expansions are done, sequence all the different
9983 fragments inside gimple_omp_body. */
9985 new_body = NULL;
9987 if (ctx->record_type)
9989 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9990 /* fixup_child_record_type might have changed receiver_decl's type. */
9991 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9992 gimple_seq_add_stmt (&new_body,
9993 gimple_build_assign (ctx->receiver_decl, t));
9996 gimple_seq_add_seq (&new_body, par_ilist);
9997 gimple_seq_add_seq (&new_body, par_body);
9998 gimple_seq_add_seq (&new_body, par_rlist);
9999 if (ctx->cancellable)
10000 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
10001 gimple_seq_add_seq (&new_body, par_olist);
10002 new_body = maybe_catch_exception (new_body);
10003 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10004 gimple_omp_set_body (stmt, new_body);
10006 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
10007 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
10008 gimple_bind_add_seq (bind, ilist);
10009 gimple_bind_add_stmt (bind, stmt);
10010 gimple_bind_add_seq (bind, olist);
10012 pop_gimplify_context (NULL);
10014 if (dep_bind)
10016 gimple_bind_add_seq (dep_bind, dep_ilist);
10017 gimple_bind_add_stmt (dep_bind, bind);
10018 gimple_bind_add_seq (dep_bind, dep_olist);
10019 pop_gimplify_context (dep_bind);
10023 /* Lower the OpenMP target directive in the current statement
10024 in GSI_P. CTX holds context information for the directive. */
10026 static void
10027 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10029 tree clauses;
10030 tree child_fn, t, c;
10031 gimple stmt = gsi_stmt (*gsi_p);
10032 gimple_bind tgt_bind = NULL, bind;
10033 gimple_seq tgt_body = NULL, olist, ilist, new_body;
10034 location_t loc = gimple_location (stmt);
10035 int kind = gimple_omp_target_kind (stmt);
10036 unsigned int map_cnt = 0;
10038 clauses = gimple_omp_target_clauses (stmt);
10039 if (kind == GF_OMP_TARGET_KIND_REGION)
10041 tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
10042 tgt_body = gimple_bind_body (tgt_bind);
10044 else if (kind == GF_OMP_TARGET_KIND_DATA)
10045 tgt_body = gimple_omp_body (stmt);
10046 child_fn = ctx->cb.dst_fn;
10048 push_gimplify_context ();
10050 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10051 switch (OMP_CLAUSE_CODE (c))
10053 tree var, x;
10055 default:
10056 break;
10057 case OMP_CLAUSE_MAP:
10058 case OMP_CLAUSE_TO:
10059 case OMP_CLAUSE_FROM:
10060 var = OMP_CLAUSE_DECL (c);
10061 if (!DECL_P (var))
10063 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
10064 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10065 map_cnt++;
10066 continue;
10069 if (DECL_SIZE (var)
10070 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
10072 tree var2 = DECL_VALUE_EXPR (var);
10073 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
10074 var2 = TREE_OPERAND (var2, 0);
10075 gcc_assert (DECL_P (var2));
10076 var = var2;
10079 if (!maybe_lookup_field (var, ctx))
10080 continue;
10082 if (kind == GF_OMP_TARGET_KIND_REGION)
10084 x = build_receiver_ref (var, true, ctx);
10085 tree new_var = lookup_decl (var, ctx);
10086 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10087 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10088 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10089 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
10090 x = build_simple_mem_ref (x);
10091 SET_DECL_VALUE_EXPR (new_var, x);
10092 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
10094 map_cnt++;
10097 if (kind == GF_OMP_TARGET_KIND_REGION)
10099 target_nesting_level++;
10100 lower_omp (&tgt_body, ctx);
10101 target_nesting_level--;
10103 else if (kind == GF_OMP_TARGET_KIND_DATA)
10104 lower_omp (&tgt_body, ctx);
10106 if (kind == GF_OMP_TARGET_KIND_REGION)
10108 /* Declare all the variables created by mapping and the variables
10109 declared in the scope of the target body. */
10110 record_vars_into (ctx->block_vars, child_fn);
10111 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
10114 olist = NULL;
10115 ilist = NULL;
10116 if (ctx->record_type)
10118 ctx->sender_decl
10119 = create_tmp_var (ctx->record_type, ".omp_data_arr");
10120 DECL_NAMELESS (ctx->sender_decl) = 1;
10121 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
10122 t = make_tree_vec (3);
10123 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
10124 TREE_VEC_ELT (t, 1)
10125 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
10126 ".omp_data_sizes");
10127 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
10128 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
10129 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
10130 TREE_VEC_ELT (t, 2)
10131 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
10132 map_cnt),
10133 ".omp_data_kinds");
10134 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
10135 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
10136 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
10137 gimple_omp_target_set_data_arg (stmt, t);
10139 vec<constructor_elt, va_gc> *vsize;
10140 vec<constructor_elt, va_gc> *vkind;
10141 vec_alloc (vsize, map_cnt);
10142 vec_alloc (vkind, map_cnt);
10143 unsigned int map_idx = 0;
10145 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
10146 switch (OMP_CLAUSE_CODE (c))
10148 tree ovar, nc;
10150 default:
10151 break;
10152 case OMP_CLAUSE_MAP:
10153 case OMP_CLAUSE_TO:
10154 case OMP_CLAUSE_FROM:
10155 nc = c;
10156 ovar = OMP_CLAUSE_DECL (c);
10157 if (!DECL_P (ovar))
10159 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10160 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
10162 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
10163 == get_base_address (ovar));
10164 nc = OMP_CLAUSE_CHAIN (c);
10165 ovar = OMP_CLAUSE_DECL (nc);
10167 else
10169 tree x = build_sender_ref (ovar, ctx);
10170 tree v
10171 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
10172 gimplify_assign (x, v, &ilist);
10173 nc = NULL_TREE;
10176 else
10178 if (DECL_SIZE (ovar)
10179 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
10181 tree ovar2 = DECL_VALUE_EXPR (ovar);
10182 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
10183 ovar2 = TREE_OPERAND (ovar2, 0);
10184 gcc_assert (DECL_P (ovar2));
10185 ovar = ovar2;
10187 if (!maybe_lookup_field (ovar, ctx))
10188 continue;
10191 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
10192 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
10193 talign = DECL_ALIGN_UNIT (ovar);
10194 if (nc)
10196 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
10197 tree x = build_sender_ref (ovar, ctx);
10198 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
10199 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
10200 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
10201 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
10203 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10204 tree avar
10205 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
10206 mark_addressable (avar);
10207 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
10208 talign = DECL_ALIGN_UNIT (avar);
10209 avar = build_fold_addr_expr (avar);
10210 gimplify_assign (x, avar, &ilist);
10212 else if (is_gimple_reg (var))
10214 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
10215 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
10216 mark_addressable (avar);
10217 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
10218 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
10219 gimplify_assign (avar, var, &ilist);
10220 avar = build_fold_addr_expr (avar);
10221 gimplify_assign (x, avar, &ilist);
10222 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
10223 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
10224 && !TYPE_READONLY (TREE_TYPE (var)))
10226 x = build_sender_ref (ovar, ctx);
10227 x = build_simple_mem_ref (x);
10228 gimplify_assign (var, x, &olist);
10231 else
10233 var = build_fold_addr_expr (var);
10234 gimplify_assign (x, var, &ilist);
10237 tree s = OMP_CLAUSE_SIZE (c);
10238 if (s == NULL_TREE)
10239 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
10240 s = fold_convert (size_type_node, s);
10241 tree purpose = size_int (map_idx++);
10242 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
10243 if (TREE_CODE (s) != INTEGER_CST)
10244 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
10246 unsigned char tkind = 0;
10247 switch (OMP_CLAUSE_CODE (c))
10249 case OMP_CLAUSE_MAP:
10250 tkind = OMP_CLAUSE_MAP_KIND (c);
10251 break;
10252 case OMP_CLAUSE_TO:
10253 tkind = OMP_CLAUSE_MAP_TO;
10254 break;
10255 case OMP_CLAUSE_FROM:
10256 tkind = OMP_CLAUSE_MAP_FROM;
10257 break;
10258 default:
10259 gcc_unreachable ();
10261 talign = ceil_log2 (talign);
10262 tkind |= talign << 3;
10263 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
10264 build_int_cst (unsigned_char_type_node,
10265 tkind));
10266 if (nc && nc != c)
10267 c = nc;
10270 gcc_assert (map_idx == map_cnt);
10272 DECL_INITIAL (TREE_VEC_ELT (t, 1))
10273 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
10274 DECL_INITIAL (TREE_VEC_ELT (t, 2))
10275 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
10276 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
10278 gimple_seq initlist = NULL;
10279 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
10280 TREE_VEC_ELT (t, 1)),
10281 &initlist, true, NULL_TREE);
10282 gimple_seq_add_seq (&ilist, initlist);
10284 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
10285 NULL);
10286 TREE_THIS_VOLATILE (clobber) = 1;
10287 gimple_seq_add_stmt (&olist,
10288 gimple_build_assign (TREE_VEC_ELT (t, 1),
10289 clobber));
10292 tree clobber = build_constructor (ctx->record_type, NULL);
10293 TREE_THIS_VOLATILE (clobber) = 1;
10294 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
10295 clobber));
10298 /* Once all the expansions are done, sequence all the different
10299 fragments inside gimple_omp_body. */
10301 new_body = NULL;
10303 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
10305 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10306 /* fixup_child_record_type might have changed receiver_decl's type. */
10307 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
10308 gimple_seq_add_stmt (&new_body,
10309 gimple_build_assign (ctx->receiver_decl, t));
10312 if (kind == GF_OMP_TARGET_KIND_REGION)
10314 gimple_seq_add_seq (&new_body, tgt_body);
10315 new_body = maybe_catch_exception (new_body);
10317 else if (kind == GF_OMP_TARGET_KIND_DATA)
10318 new_body = tgt_body;
10319 if (kind != GF_OMP_TARGET_KIND_UPDATE)
10321 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
10322 gimple_omp_set_body (stmt, new_body);
10325 bind = gimple_build_bind (NULL, NULL,
10326 tgt_bind ? gimple_bind_block (tgt_bind)
10327 : NULL_TREE);
10328 gsi_replace (gsi_p, bind, true);
10329 gimple_bind_add_seq (bind, ilist);
10330 gimple_bind_add_stmt (bind, stmt);
10331 gimple_bind_add_seq (bind, olist);
10333 pop_gimplify_context (NULL);
10336 /* Expand code for an OpenMP teams directive. */
10338 static void
10339 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10341 gimple teams_stmt = gsi_stmt (*gsi_p);
10342 push_gimplify_context ();
10344 tree block = make_node (BLOCK);
10345 gimple_bind bind = gimple_build_bind (NULL, NULL, block);
10346 gsi_replace (gsi_p, bind, true);
10347 gimple_seq bind_body = NULL;
10348 gimple_seq dlist = NULL;
10349 gimple_seq olist = NULL;
10351 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10352 OMP_CLAUSE_NUM_TEAMS);
10353 if (num_teams == NULL_TREE)
10354 num_teams = build_int_cst (unsigned_type_node, 0);
10355 else
10357 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
10358 num_teams = fold_convert (unsigned_type_node, num_teams);
10359 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
10361 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
10362 OMP_CLAUSE_THREAD_LIMIT);
10363 if (thread_limit == NULL_TREE)
10364 thread_limit = build_int_cst (unsigned_type_node, 0);
10365 else
10367 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
10368 thread_limit = fold_convert (unsigned_type_node, thread_limit);
10369 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
10370 fb_rvalue);
10373 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
10374 &bind_body, &dlist, ctx, NULL);
10375 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
10376 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
10377 gimple_seq_add_stmt (&bind_body, teams_stmt);
10379 location_t loc = gimple_location (teams_stmt);
10380 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
10381 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
10382 gimple_set_location (call, loc);
10383 gimple_seq_add_stmt (&bind_body, call);
10385 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
10386 gimple_omp_set_body (teams_stmt, NULL);
10387 gimple_seq_add_seq (&bind_body, olist);
10388 gimple_seq_add_seq (&bind_body, dlist);
10389 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
10390 gimple_bind_set_body (bind, bind_body);
10392 pop_gimplify_context (bind);
10394 gimple_bind_append_vars (bind, ctx->block_vars);
10395 BLOCK_VARS (block) = ctx->block_vars;
10396 if (BLOCK_VARS (block))
10397 TREE_USED (block) = 1;
10401 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
10402 regimplified. If DATA is non-NULL, lower_omp_1 is outside
10403 of OpenMP context, but with task_shared_vars set. */
10405 static tree
10406 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
10407 void *data)
10409 tree t = *tp;
10411 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
10412 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
10413 return t;
10415 if (task_shared_vars
10416 && DECL_P (t)
10417 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
10418 return t;
10420 /* If a global variable has been privatized, TREE_CONSTANT on
10421 ADDR_EXPR might be wrong. */
10422 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
10423 recompute_tree_invariant_for_addr_expr (t);
10425 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
10426 return NULL_TREE;
10429 static void
10430 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10432 gimple stmt = gsi_stmt (*gsi_p);
10433 struct walk_stmt_info wi;
10435 if (gimple_has_location (stmt))
10436 input_location = gimple_location (stmt);
10438 if (task_shared_vars)
10439 memset (&wi, '\0', sizeof (wi));
10441 /* If we have issued syntax errors, avoid doing any heavy lifting.
10442 Just replace the OpenMP directives with a NOP to avoid
10443 confusing RTL expansion. */
10444 if (seen_error () && is_gimple_omp (stmt))
10446 gsi_replace (gsi_p, gimple_build_nop (), true);
10447 return;
10450 switch (gimple_code (stmt))
10452 case GIMPLE_COND:
10453 if ((ctx || task_shared_vars)
10454 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
10455 ctx ? NULL : &wi, NULL)
10456 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
10457 ctx ? NULL : &wi, NULL)))
10458 gimple_regimplify_operands (stmt, gsi_p);
10459 break;
10460 case GIMPLE_CATCH:
10461 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
10462 break;
10463 case GIMPLE_EH_FILTER:
10464 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
10465 break;
10466 case GIMPLE_TRY:
10467 lower_omp (gimple_try_eval_ptr (stmt), ctx);
10468 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
10469 break;
10470 case GIMPLE_TRANSACTION:
10471 lower_omp (gimple_transaction_body_ptr (
10472 as_a <gimple_transaction> (stmt)),
10473 ctx);
10474 break;
10475 case GIMPLE_BIND:
10476 lower_omp (gimple_bind_body_ptr (as_a <gimple_bind> (stmt)), ctx);
10477 break;
10478 case GIMPLE_OMP_PARALLEL:
10479 case GIMPLE_OMP_TASK:
10480 ctx = maybe_lookup_ctx (stmt);
10481 gcc_assert (ctx);
10482 if (ctx->cancellable)
10483 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10484 lower_omp_taskreg (gsi_p, ctx);
10485 break;
10486 case GIMPLE_OMP_FOR:
10487 ctx = maybe_lookup_ctx (stmt);
10488 gcc_assert (ctx);
10489 if (ctx->cancellable)
10490 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10491 lower_omp_for (gsi_p, ctx);
10492 break;
10493 case GIMPLE_OMP_SECTIONS:
10494 ctx = maybe_lookup_ctx (stmt);
10495 gcc_assert (ctx);
10496 if (ctx->cancellable)
10497 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
10498 lower_omp_sections (gsi_p, ctx);
10499 break;
10500 case GIMPLE_OMP_SINGLE:
10501 ctx = maybe_lookup_ctx (stmt);
10502 gcc_assert (ctx);
10503 lower_omp_single (gsi_p, ctx);
10504 break;
10505 case GIMPLE_OMP_MASTER:
10506 ctx = maybe_lookup_ctx (stmt);
10507 gcc_assert (ctx);
10508 lower_omp_master (gsi_p, ctx);
10509 break;
10510 case GIMPLE_OMP_TASKGROUP:
10511 ctx = maybe_lookup_ctx (stmt);
10512 gcc_assert (ctx);
10513 lower_omp_taskgroup (gsi_p, ctx);
10514 break;
10515 case GIMPLE_OMP_ORDERED:
10516 ctx = maybe_lookup_ctx (stmt);
10517 gcc_assert (ctx);
10518 lower_omp_ordered (gsi_p, ctx);
10519 break;
10520 case GIMPLE_OMP_CRITICAL:
10521 ctx = maybe_lookup_ctx (stmt);
10522 gcc_assert (ctx);
10523 lower_omp_critical (gsi_p, ctx);
10524 break;
10525 case GIMPLE_OMP_ATOMIC_LOAD:
10526 if ((ctx || task_shared_vars)
10527 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
10528 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
10529 gimple_regimplify_operands (stmt, gsi_p);
10530 break;
10531 case GIMPLE_OMP_TARGET:
10532 ctx = maybe_lookup_ctx (stmt);
10533 gcc_assert (ctx);
10534 lower_omp_target (gsi_p, ctx);
10535 break;
10536 case GIMPLE_OMP_TEAMS:
10537 ctx = maybe_lookup_ctx (stmt);
10538 gcc_assert (ctx);
10539 lower_omp_teams (gsi_p, ctx);
10540 break;
10541 case GIMPLE_CALL:
10542 tree fndecl;
10543 fndecl = gimple_call_fndecl (stmt);
10544 if (fndecl
10545 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10546 switch (DECL_FUNCTION_CODE (fndecl))
10548 case BUILT_IN_GOMP_BARRIER:
10549 if (ctx == NULL)
10550 break;
10551 /* FALLTHRU */
10552 case BUILT_IN_GOMP_CANCEL:
10553 case BUILT_IN_GOMP_CANCELLATION_POINT:
10554 omp_context *cctx;
10555 cctx = ctx;
10556 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10557 cctx = cctx->outer;
10558 gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
10559 if (!cctx->cancellable)
10561 if (DECL_FUNCTION_CODE (fndecl)
10562 == BUILT_IN_GOMP_CANCELLATION_POINT)
10564 stmt = gimple_build_nop ();
10565 gsi_replace (gsi_p, stmt, false);
10567 break;
10569 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10571 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10572 gimple_call_set_fndecl (stmt, fndecl);
10573 gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
10575 tree lhs;
10576 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)), NULL);
10577 gimple_call_set_lhs (stmt, lhs);
10578 tree fallthru_label;
10579 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10580 gimple g;
10581 g = gimple_build_label (fallthru_label);
10582 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10583 g = gimple_build_cond (NE_EXPR, lhs,
10584 fold_convert (TREE_TYPE (lhs),
10585 boolean_false_node),
10586 cctx->cancel_label, fallthru_label);
10587 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10588 break;
10589 default:
10590 break;
10592 /* FALLTHRU */
10593 default:
10594 if ((ctx || task_shared_vars)
10595 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10596 ctx ? NULL : &wi))
10598 /* Just remove clobbers, this should happen only if we have
10599 "privatized" local addressable variables in SIMD regions,
10600 the clobber isn't needed in that case and gimplifying address
10601 of the ARRAY_REF into a pointer and creating MEM_REF based
10602 clobber would create worse code than we get with the clobber
10603 dropped. */
10604 if (gimple_clobber_p (stmt))
10606 gsi_replace (gsi_p, gimple_build_nop (), true);
10607 break;
10609 gimple_regimplify_operands (stmt, gsi_p);
10611 break;
10615 static void
10616 lower_omp (gimple_seq *body, omp_context *ctx)
10618 location_t saved_location = input_location;
10619 gimple_stmt_iterator gsi;
10620 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10621 lower_omp_1 (&gsi, ctx);
10622 /* During gimplification, we have not always invoked fold_stmt
10623 (gimplify.c:maybe_fold_stmt); call it now. */
10624 if (target_nesting_level)
10625 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10626 fold_stmt (&gsi);
10627 input_location = saved_location;
10630 /* Main entry point. */
10632 static unsigned int
10633 execute_lower_omp (void)
10635 gimple_seq body;
10636 int i;
10637 omp_context *ctx;
10639 /* This pass always runs, to provide PROP_gimple_lomp.
10640 But there is nothing to do unless -fopenmp is given. */
10641 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_cilkplus == 0)
10642 return 0;
10644 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10645 delete_omp_context);
10647 body = gimple_body (current_function_decl);
10648 scan_omp (&body, NULL);
10649 gcc_assert (taskreg_nesting_level == 0);
10650 FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
10651 finish_taskreg_scan (ctx);
10652 taskreg_contexts.release ();
10654 if (all_contexts->root)
10656 if (task_shared_vars)
10657 push_gimplify_context ();
10658 lower_omp (&body, NULL);
10659 if (task_shared_vars)
10660 pop_gimplify_context (NULL);
10663 if (all_contexts)
10665 splay_tree_delete (all_contexts);
10666 all_contexts = NULL;
10668 BITMAP_FREE (task_shared_vars);
10669 return 0;
10672 namespace {
10674 const pass_data pass_data_lower_omp =
10676 GIMPLE_PASS, /* type */
10677 "omplower", /* name */
10678 OPTGROUP_NONE, /* optinfo_flags */
10679 TV_NONE, /* tv_id */
10680 PROP_gimple_any, /* properties_required */
10681 PROP_gimple_lomp, /* properties_provided */
10682 0, /* properties_destroyed */
10683 0, /* todo_flags_start */
10684 0, /* todo_flags_finish */
10687 class pass_lower_omp : public gimple_opt_pass
10689 public:
10690 pass_lower_omp (gcc::context *ctxt)
10691 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10694 /* opt_pass methods: */
10695 virtual unsigned int execute (function *) { return execute_lower_omp (); }
10697 }; // class pass_lower_omp
10699 } // anon namespace
10701 gimple_opt_pass *
10702 make_pass_lower_omp (gcc::context *ctxt)
10704 return new pass_lower_omp (ctxt);
10707 /* The following is a utility to diagnose OpenMP structured block violations.
10708 It is not part of the "omplower" pass, as that's invoked too late. It
10709 should be invoked by the respective front ends after gimplification. */
10711 static splay_tree all_labels;
10713 /* Check for mismatched contexts and generate an error if needed. Return
10714 true if an error is detected. */
10716 static bool
10717 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10718 gimple branch_ctx, gimple label_ctx)
10720 if (label_ctx == branch_ctx)
10721 return false;
10725 Previously we kept track of the label's entire context in diagnose_sb_[12]
10726 so we could traverse it and issue a correct "exit" or "enter" error
10727 message upon a structured block violation.
10729 We built the context by building a list with tree_cons'ing, but there is
10730 no easy counterpart in gimple tuples. It seems like far too much work
10731 for issuing exit/enter error messages. If someone really misses the
10732 distinct error message... patches welcome.
10735 #if 0
10736 /* Try to avoid confusing the user by producing and error message
10737 with correct "exit" or "enter" verbiage. We prefer "exit"
10738 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10739 if (branch_ctx == NULL)
10740 exit_p = false;
10741 else
10743 while (label_ctx)
10745 if (TREE_VALUE (label_ctx) == branch_ctx)
10747 exit_p = false;
10748 break;
10750 label_ctx = TREE_CHAIN (label_ctx);
10754 if (exit_p)
10755 error ("invalid exit from OpenMP structured block");
10756 else
10757 error ("invalid entry to OpenMP structured block");
10758 #endif
10760 bool cilkplus_block = false;
10761 if (flag_cilkplus)
10763 if ((branch_ctx
10764 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10765 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10766 || (label_ctx
10767 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
10768 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10769 cilkplus_block = true;
10772 /* If it's obvious we have an invalid entry, be specific about the error. */
10773 if (branch_ctx == NULL)
10775 if (cilkplus_block)
10776 error ("invalid entry to Cilk Plus structured block");
10777 else
10778 error ("invalid entry to OpenMP structured block");
10780 else
10782 /* Otherwise, be vague and lazy, but efficient. */
10783 if (cilkplus_block)
10784 error ("invalid branch to/from a Cilk Plus structured block");
10785 else
10786 error ("invalid branch to/from an OpenMP structured block");
10789 gsi_replace (gsi_p, gimple_build_nop (), false);
10790 return true;
10793 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10794 where each label is found. */
10796 static tree
10797 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10798 struct walk_stmt_info *wi)
10800 gimple context = (gimple) wi->info;
10801 gimple inner_context;
10802 gimple stmt = gsi_stmt (*gsi_p);
10804 *handled_ops_p = true;
10806 switch (gimple_code (stmt))
10808 WALK_SUBSTMTS;
10810 case GIMPLE_OMP_PARALLEL:
10811 case GIMPLE_OMP_TASK:
10812 case GIMPLE_OMP_SECTIONS:
10813 case GIMPLE_OMP_SINGLE:
10814 case GIMPLE_OMP_SECTION:
10815 case GIMPLE_OMP_MASTER:
10816 case GIMPLE_OMP_ORDERED:
10817 case GIMPLE_OMP_CRITICAL:
10818 case GIMPLE_OMP_TARGET:
10819 case GIMPLE_OMP_TEAMS:
10820 case GIMPLE_OMP_TASKGROUP:
10821 /* The minimal context here is just the current OMP construct. */
10822 inner_context = stmt;
10823 wi->info = inner_context;
10824 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10825 wi->info = context;
10826 break;
10828 case GIMPLE_OMP_FOR:
10829 inner_context = stmt;
10830 wi->info = inner_context;
10831 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10832 walk them. */
10833 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10834 diagnose_sb_1, NULL, wi);
10835 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10836 wi->info = context;
10837 break;
10839 case GIMPLE_LABEL:
10840 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
10841 (splay_tree_value) context);
10842 break;
10844 default:
10845 break;
10848 return NULL_TREE;
10851 /* Pass 2: Check each branch and see if its context differs from that of
10852 the destination label's context. */
10854 static tree
10855 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10856 struct walk_stmt_info *wi)
10858 gimple context = (gimple) wi->info;
10859 splay_tree_node n;
10860 gimple stmt = gsi_stmt (*gsi_p);
10862 *handled_ops_p = true;
10864 switch (gimple_code (stmt))
10866 WALK_SUBSTMTS;
10868 case GIMPLE_OMP_PARALLEL:
10869 case GIMPLE_OMP_TASK:
10870 case GIMPLE_OMP_SECTIONS:
10871 case GIMPLE_OMP_SINGLE:
10872 case GIMPLE_OMP_SECTION:
10873 case GIMPLE_OMP_MASTER:
10874 case GIMPLE_OMP_ORDERED:
10875 case GIMPLE_OMP_CRITICAL:
10876 case GIMPLE_OMP_TARGET:
10877 case GIMPLE_OMP_TEAMS:
10878 case GIMPLE_OMP_TASKGROUP:
10879 wi->info = stmt;
10880 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10881 wi->info = context;
10882 break;
10884 case GIMPLE_OMP_FOR:
10885 wi->info = stmt;
10886 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10887 walk them. */
10888 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10889 diagnose_sb_2, NULL, wi);
10890 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10891 wi->info = context;
10892 break;
10894 case GIMPLE_COND:
10896 tree lab = gimple_cond_true_label (stmt);
10897 if (lab)
10899 n = splay_tree_lookup (all_labels,
10900 (splay_tree_key) lab);
10901 diagnose_sb_0 (gsi_p, context,
10902 n ? (gimple) n->value : NULL);
10904 lab = gimple_cond_false_label (stmt);
10905 if (lab)
10907 n = splay_tree_lookup (all_labels,
10908 (splay_tree_key) lab);
10909 diagnose_sb_0 (gsi_p, context,
10910 n ? (gimple) n->value : NULL);
10913 break;
10915 case GIMPLE_GOTO:
10917 tree lab = gimple_goto_dest (stmt);
10918 if (TREE_CODE (lab) != LABEL_DECL)
10919 break;
10921 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10922 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
10924 break;
10926 case GIMPLE_SWITCH:
10928 gimple_switch switch_stmt = as_a <gimple_switch> (stmt);
10929 unsigned int i;
10930 for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
10932 tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
10933 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10934 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
10935 break;
10938 break;
10940 case GIMPLE_RETURN:
10941 diagnose_sb_0 (gsi_p, context, NULL);
10942 break;
10944 default:
10945 break;
10948 return NULL_TREE;
10951 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10952 codes. */
10953 bool
10954 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
10955 int *region_idx)
10957 gimple last = last_stmt (bb);
10958 enum gimple_code code = gimple_code (last);
10959 struct omp_region *cur_region = *region;
10960 bool fallthru = false;
10962 switch (code)
10964 case GIMPLE_OMP_PARALLEL:
10965 case GIMPLE_OMP_TASK:
10966 case GIMPLE_OMP_FOR:
10967 case GIMPLE_OMP_SINGLE:
10968 case GIMPLE_OMP_TEAMS:
10969 case GIMPLE_OMP_MASTER:
10970 case GIMPLE_OMP_TASKGROUP:
10971 case GIMPLE_OMP_ORDERED:
10972 case GIMPLE_OMP_CRITICAL:
10973 case GIMPLE_OMP_SECTION:
10974 cur_region = new_omp_region (bb, code, cur_region);
10975 fallthru = true;
10976 break;
10978 case GIMPLE_OMP_TARGET:
10979 cur_region = new_omp_region (bb, code, cur_region);
10980 fallthru = true;
10981 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
10982 cur_region = cur_region->outer;
10983 break;
10985 case GIMPLE_OMP_SECTIONS:
10986 cur_region = new_omp_region (bb, code, cur_region);
10987 fallthru = true;
10988 break;
10990 case GIMPLE_OMP_SECTIONS_SWITCH:
10991 fallthru = false;
10992 break;
10994 case GIMPLE_OMP_ATOMIC_LOAD:
10995 case GIMPLE_OMP_ATOMIC_STORE:
10996 fallthru = true;
10997 break;
10999 case GIMPLE_OMP_RETURN:
11000 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
11001 somewhere other than the next block. This will be
11002 created later. */
11003 cur_region->exit = bb;
11004 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
11005 cur_region = cur_region->outer;
11006 break;
11008 case GIMPLE_OMP_CONTINUE:
11009 cur_region->cont = bb;
11010 switch (cur_region->type)
11012 case GIMPLE_OMP_FOR:
11013 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
11014 succs edges as abnormal to prevent splitting
11015 them. */
11016 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
11017 /* Make the loopback edge. */
11018 make_edge (bb, single_succ (cur_region->entry),
11019 EDGE_ABNORMAL);
11021 /* Create an edge from GIMPLE_OMP_FOR to exit, which
11022 corresponds to the case that the body of the loop
11023 is not executed at all. */
11024 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
11025 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
11026 fallthru = false;
11027 break;
11029 case GIMPLE_OMP_SECTIONS:
11030 /* Wire up the edges into and out of the nested sections. */
11032 basic_block switch_bb = single_succ (cur_region->entry);
11034 struct omp_region *i;
11035 for (i = cur_region->inner; i ; i = i->next)
11037 gcc_assert (i->type == GIMPLE_OMP_SECTION);
11038 make_edge (switch_bb, i->entry, 0);
11039 make_edge (i->exit, bb, EDGE_FALLTHRU);
11042 /* Make the loopback edge to the block with
11043 GIMPLE_OMP_SECTIONS_SWITCH. */
11044 make_edge (bb, switch_bb, 0);
11046 /* Make the edge from the switch to exit. */
11047 make_edge (switch_bb, bb->next_bb, 0);
11048 fallthru = false;
11050 break;
11052 default:
11053 gcc_unreachable ();
11055 break;
11057 default:
11058 gcc_unreachable ();
11061 if (*region != cur_region)
11063 *region = cur_region;
11064 if (cur_region)
11065 *region_idx = cur_region->entry->index;
11066 else
11067 *region_idx = 0;
11070 return fallthru;
11073 static unsigned int
11074 diagnose_omp_structured_block_errors (void)
11076 struct walk_stmt_info wi;
11077 gimple_seq body = gimple_body (current_function_decl);
11079 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
11081 memset (&wi, 0, sizeof (wi));
11082 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
11084 memset (&wi, 0, sizeof (wi));
11085 wi.want_locations = true;
11086 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
11088 gimple_set_body (current_function_decl, body);
11090 splay_tree_delete (all_labels);
11091 all_labels = NULL;
11093 return 0;
11096 namespace {
11098 const pass_data pass_data_diagnose_omp_blocks =
11100 GIMPLE_PASS, /* type */
11101 "*diagnose_omp_blocks", /* name */
11102 OPTGROUP_NONE, /* optinfo_flags */
11103 TV_NONE, /* tv_id */
11104 PROP_gimple_any, /* properties_required */
11105 0, /* properties_provided */
11106 0, /* properties_destroyed */
11107 0, /* todo_flags_start */
11108 0, /* todo_flags_finish */
11111 class pass_diagnose_omp_blocks : public gimple_opt_pass
11113 public:
11114 pass_diagnose_omp_blocks (gcc::context *ctxt)
11115 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
11118 /* opt_pass methods: */
11119 virtual bool gate (function *) { return flag_openmp || flag_cilkplus; }
11120 virtual unsigned int execute (function *)
11122 return diagnose_omp_structured_block_errors ();
11125 }; // class pass_diagnose_omp_blocks
11127 } // anon namespace
11129 gimple_opt_pass *
11130 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
11132 return new pass_diagnose_omp_blocks (ctxt);
11135 /* SIMD clone supporting code. */
11137 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
11138 of arguments to reserve space for. */
11140 static struct cgraph_simd_clone *
11141 simd_clone_struct_alloc (int nargs)
11143 struct cgraph_simd_clone *clone_info;
11144 size_t len = (sizeof (struct cgraph_simd_clone)
11145 + nargs * sizeof (struct cgraph_simd_clone_arg));
11146 clone_info = (struct cgraph_simd_clone *)
11147 ggc_internal_cleared_alloc (len);
11148 return clone_info;
11151 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
11153 static inline void
11154 simd_clone_struct_copy (struct cgraph_simd_clone *to,
11155 struct cgraph_simd_clone *from)
11157 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
11158 + ((from->nargs - from->inbranch)
11159 * sizeof (struct cgraph_simd_clone_arg))));
11162 /* Return vector of parameter types of function FNDECL. This uses
11163 TYPE_ARG_TYPES if available, otherwise falls back to types of
11164 DECL_ARGUMENTS types. */
11166 vec<tree>
11167 simd_clone_vector_of_formal_parm_types (tree fndecl)
11169 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
11170 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
11171 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
11172 unsigned int i;
11173 tree arg;
11174 FOR_EACH_VEC_ELT (args, i, arg)
11175 args[i] = TREE_TYPE (args[i]);
11176 return args;
11179 /* Given a simd function in NODE, extract the simd specific
11180 information from the OMP clauses passed in CLAUSES, and return
11181 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
11182 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
11183 otherwise set to FALSE. */
11185 static struct cgraph_simd_clone *
11186 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
11187 bool *inbranch_specified)
11189 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
11190 tree t;
11191 int n;
11192 *inbranch_specified = false;
11194 n = args.length ();
11195 if (n > 0 && args.last () == void_type_node)
11196 n--;
11198 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
11199 be cloned have a distinctive artificial label in addition to "omp
11200 declare simd". */
11201 bool cilk_clone
11202 = (flag_cilkplus
11203 && lookup_attribute ("cilk simd function",
11204 DECL_ATTRIBUTES (node->decl)));
11206 /* Allocate one more than needed just in case this is an in-branch
11207 clone which will require a mask argument. */
11208 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
11209 clone_info->nargs = n;
11210 clone_info->cilk_elemental = cilk_clone;
11212 if (!clauses)
11214 args.release ();
11215 return clone_info;
11217 clauses = TREE_VALUE (clauses);
11218 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
11219 return clone_info;
11221 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
11223 switch (OMP_CLAUSE_CODE (t))
11225 case OMP_CLAUSE_INBRANCH:
11226 clone_info->inbranch = 1;
11227 *inbranch_specified = true;
11228 break;
11229 case OMP_CLAUSE_NOTINBRANCH:
11230 clone_info->inbranch = 0;
11231 *inbranch_specified = true;
11232 break;
11233 case OMP_CLAUSE_SIMDLEN:
11234 clone_info->simdlen
11235 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
11236 break;
11237 case OMP_CLAUSE_LINEAR:
11239 tree decl = OMP_CLAUSE_DECL (t);
11240 tree step = OMP_CLAUSE_LINEAR_STEP (t);
11241 int argno = TREE_INT_CST_LOW (decl);
11242 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
11244 clone_info->args[argno].arg_type
11245 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
11246 clone_info->args[argno].linear_step = tree_to_shwi (step);
11247 gcc_assert (clone_info->args[argno].linear_step >= 0
11248 && clone_info->args[argno].linear_step < n);
11250 else
11252 if (POINTER_TYPE_P (args[argno]))
11253 step = fold_convert (ssizetype, step);
11254 if (!tree_fits_shwi_p (step))
11256 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11257 "ignoring large linear step");
11258 args.release ();
11259 return NULL;
11261 else if (integer_zerop (step))
11263 warning_at (OMP_CLAUSE_LOCATION (t), 0,
11264 "ignoring zero linear step");
11265 args.release ();
11266 return NULL;
11268 else
11270 clone_info->args[argno].arg_type
11271 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
11272 clone_info->args[argno].linear_step = tree_to_shwi (step);
11275 break;
11277 case OMP_CLAUSE_UNIFORM:
11279 tree decl = OMP_CLAUSE_DECL (t);
11280 int argno = tree_to_uhwi (decl);
11281 clone_info->args[argno].arg_type
11282 = SIMD_CLONE_ARG_TYPE_UNIFORM;
11283 break;
11285 case OMP_CLAUSE_ALIGNED:
11287 tree decl = OMP_CLAUSE_DECL (t);
11288 int argno = tree_to_uhwi (decl);
11289 clone_info->args[argno].alignment
11290 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
11291 break;
11293 default:
11294 break;
11297 args.release ();
11298 return clone_info;
11301 /* Given a SIMD clone in NODE, calculate the characteristic data
11302 type and return the coresponding type. The characteristic data
11303 type is computed as described in the Intel Vector ABI. */
11305 static tree
11306 simd_clone_compute_base_data_type (struct cgraph_node *node,
11307 struct cgraph_simd_clone *clone_info)
11309 tree type = integer_type_node;
11310 tree fndecl = node->decl;
11312 /* a) For non-void function, the characteristic data type is the
11313 return type. */
11314 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
11315 type = TREE_TYPE (TREE_TYPE (fndecl));
11317 /* b) If the function has any non-uniform, non-linear parameters,
11318 then the characteristic data type is the type of the first
11319 such parameter. */
11320 else
11322 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
11323 for (unsigned int i = 0; i < clone_info->nargs; ++i)
11324 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
11326 type = map[i];
11327 break;
11329 map.release ();
11332 /* c) If the characteristic data type determined by a) or b) above
11333 is struct, union, or class type which is pass-by-value (except
11334 for the type that maps to the built-in complex data type), the
11335 characteristic data type is int. */
11336 if (RECORD_OR_UNION_TYPE_P (type)
11337 && !aggregate_value_p (type, NULL)
11338 && TREE_CODE (type) != COMPLEX_TYPE)
11339 return integer_type_node;
11341 /* d) If none of the above three classes is applicable, the
11342 characteristic data type is int. */
11344 return type;
11346 /* e) For Intel Xeon Phi native and offload compilation, if the
11347 resulting characteristic data type is 8-bit or 16-bit integer
11348 data type, the characteristic data type is int. */
11349 /* Well, we don't handle Xeon Phi yet. */
11352 static tree
11353 simd_clone_mangle (struct cgraph_node *node,
11354 struct cgraph_simd_clone *clone_info)
11356 char vecsize_mangle = clone_info->vecsize_mangle;
11357 char mask = clone_info->inbranch ? 'M' : 'N';
11358 unsigned int simdlen = clone_info->simdlen;
11359 unsigned int n;
11360 pretty_printer pp;
11362 gcc_assert (vecsize_mangle && simdlen);
11364 pp_string (&pp, "_ZGV");
11365 pp_character (&pp, vecsize_mangle);
11366 pp_character (&pp, mask);
11367 pp_decimal_int (&pp, simdlen);
11369 for (n = 0; n < clone_info->nargs; ++n)
11371 struct cgraph_simd_clone_arg arg = clone_info->args[n];
11373 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
11374 pp_character (&pp, 'u');
11375 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
11377 gcc_assert (arg.linear_step != 0);
11378 pp_character (&pp, 'l');
11379 if (arg.linear_step > 1)
11380 pp_unsigned_wide_integer (&pp, arg.linear_step);
11381 else if (arg.linear_step < 0)
11383 pp_character (&pp, 'n');
11384 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
11385 arg.linear_step));
11388 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
11390 pp_character (&pp, 's');
11391 pp_unsigned_wide_integer (&pp, arg.linear_step);
11393 else
11394 pp_character (&pp, 'v');
11395 if (arg.alignment)
11397 pp_character (&pp, 'a');
11398 pp_decimal_int (&pp, arg.alignment);
11402 pp_underscore (&pp);
11403 pp_string (&pp,
11404 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)));
11405 const char *str = pp_formatted_text (&pp);
11407 /* If there already is a SIMD clone with the same mangled name, don't
11408 add another one. This can happen e.g. for
11409 #pragma omp declare simd
11410 #pragma omp declare simd simdlen(8)
11411 int foo (int, int);
11412 if the simdlen is assumed to be 8 for the first one, etc. */
11413 for (struct cgraph_node *clone = node->simd_clones; clone;
11414 clone = clone->simdclone->next_clone)
11415 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
11416 str) == 0)
11417 return NULL_TREE;
11419 return get_identifier (str);
11422 /* Create a simd clone of OLD_NODE and return it. */
11424 static struct cgraph_node *
11425 simd_clone_create (struct cgraph_node *old_node)
11427 struct cgraph_node *new_node;
11428 if (old_node->definition)
11430 if (!old_node->has_gimple_body_p ())
11431 return NULL;
11432 old_node->get_body ();
11433 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
11434 false, NULL, NULL,
11435 "simdclone");
11437 else
11439 tree old_decl = old_node->decl;
11440 tree new_decl = copy_node (old_node->decl);
11441 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
11442 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
11443 SET_DECL_RTL (new_decl, NULL);
11444 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
11445 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
11446 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
11447 symtab->call_cgraph_insertion_hooks (new_node);
11449 if (new_node == NULL)
11450 return new_node;
11452 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
11454 /* The function cgraph_function_versioning () will force the new
11455 symbol local. Undo this, and inherit external visability from
11456 the old node. */
11457 new_node->local.local = old_node->local.local;
11458 new_node->externally_visible = old_node->externally_visible;
11460 return new_node;
11463 /* Adjust the return type of the given function to its appropriate
11464 vector counterpart. Returns a simd array to be used throughout the
11465 function as a return value. */
11467 static tree
11468 simd_clone_adjust_return_type (struct cgraph_node *node)
11470 tree fndecl = node->decl;
11471 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
11472 unsigned int veclen;
11473 tree t;
11475 /* Adjust the function return type. */
11476 if (orig_rettype == void_type_node)
11477 return NULL_TREE;
11478 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
11479 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl)))
11480 || POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (fndecl))))
11481 veclen = node->simdclone->vecsize_int;
11482 else
11483 veclen = node->simdclone->vecsize_float;
11484 veclen /= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (fndecl))));
11485 if (veclen > node->simdclone->simdlen)
11486 veclen = node->simdclone->simdlen;
11487 if (veclen == node->simdclone->simdlen)
11488 TREE_TYPE (TREE_TYPE (fndecl))
11489 = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)),
11490 node->simdclone->simdlen);
11491 else
11493 t = build_vector_type (TREE_TYPE (TREE_TYPE (fndecl)), veclen);
11494 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
11495 TREE_TYPE (TREE_TYPE (fndecl)) = t;
11497 if (!node->definition)
11498 return NULL_TREE;
11500 t = DECL_RESULT (fndecl);
11501 /* Adjust the DECL_RESULT. */
11502 gcc_assert (TREE_TYPE (t) != void_type_node);
11503 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
11504 relayout_decl (t);
11506 tree atype = build_array_type_nelts (orig_rettype,
11507 node->simdclone->simdlen);
11508 if (veclen != node->simdclone->simdlen)
11509 return build1 (VIEW_CONVERT_EXPR, atype, t);
11511 /* Set up a SIMD array to use as the return value. */
11512 tree retval = create_tmp_var_raw (atype, "retval");
11513 gimple_add_tmp_var (retval);
11514 return retval;
11517 /* Each vector argument has a corresponding array to be used locally
11518 as part of the eventual loop. Create such temporary array and
11519 return it.
11521 PREFIX is the prefix to be used for the temporary.
11523 TYPE is the inner element type.
11525 SIMDLEN is the number of elements. */
11527 static tree
11528 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
11530 tree atype = build_array_type_nelts (type, simdlen);
11531 tree avar = create_tmp_var_raw (atype, prefix);
11532 gimple_add_tmp_var (avar);
11533 return avar;
11536 /* Modify the function argument types to their corresponding vector
11537 counterparts if appropriate. Also, create one array for each simd
11538 argument to be used locally when using the function arguments as
11539 part of the loop.
11541 NODE is the function whose arguments are to be adjusted.
11543 Returns an adjustment vector that will be filled describing how the
11544 argument types will be adjusted. */
11546 static ipa_parm_adjustment_vec
11547 simd_clone_adjust_argument_types (struct cgraph_node *node)
11549 vec<tree> args;
11550 ipa_parm_adjustment_vec adjustments;
11552 if (node->definition)
11553 args = ipa_get_vector_of_formal_parms (node->decl);
11554 else
11555 args = simd_clone_vector_of_formal_parm_types (node->decl);
11556 adjustments.create (args.length ());
11557 unsigned i, j, veclen;
11558 struct ipa_parm_adjustment adj;
11559 for (i = 0; i < node->simdclone->nargs; ++i)
11561 memset (&adj, 0, sizeof (adj));
11562 tree parm = args[i];
11563 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
11564 adj.base_index = i;
11565 adj.base = parm;
11567 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
11568 node->simdclone->args[i].orig_type = parm_type;
11570 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
11572 /* No adjustment necessary for scalar arguments. */
11573 adj.op = IPA_PARM_OP_COPY;
11575 else
11577 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
11578 veclen = node->simdclone->vecsize_int;
11579 else
11580 veclen = node->simdclone->vecsize_float;
11581 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
11582 if (veclen > node->simdclone->simdlen)
11583 veclen = node->simdclone->simdlen;
11584 adj.arg_prefix = "simd";
11585 adj.type = build_vector_type (parm_type, veclen);
11586 node->simdclone->args[i].vector_type = adj.type;
11587 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11589 adjustments.safe_push (adj);
11590 if (j == veclen)
11592 memset (&adj, 0, sizeof (adj));
11593 adj.op = IPA_PARM_OP_NEW;
11594 adj.arg_prefix = "simd";
11595 adj.base_index = i;
11596 adj.type = node->simdclone->args[i].vector_type;
11600 if (node->definition)
11601 node->simdclone->args[i].simd_array
11602 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
11603 parm_type, node->simdclone->simdlen);
11605 adjustments.safe_push (adj);
11608 if (node->simdclone->inbranch)
11610 tree base_type
11611 = simd_clone_compute_base_data_type (node->simdclone->origin,
11612 node->simdclone);
11614 memset (&adj, 0, sizeof (adj));
11615 adj.op = IPA_PARM_OP_NEW;
11616 adj.arg_prefix = "mask";
11618 adj.base_index = i;
11619 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
11620 veclen = node->simdclone->vecsize_int;
11621 else
11622 veclen = node->simdclone->vecsize_float;
11623 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
11624 if (veclen > node->simdclone->simdlen)
11625 veclen = node->simdclone->simdlen;
11626 adj.type = build_vector_type (base_type, veclen);
11627 adjustments.safe_push (adj);
11629 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
11630 adjustments.safe_push (adj);
11632 /* We have previously allocated one extra entry for the mask. Use
11633 it and fill it. */
11634 struct cgraph_simd_clone *sc = node->simdclone;
11635 sc->nargs++;
11636 if (node->definition)
11638 sc->args[i].orig_arg
11639 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
11640 sc->args[i].simd_array
11641 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
11643 sc->args[i].orig_type = base_type;
11644 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
11647 if (node->definition)
11648 ipa_modify_formal_parameters (node->decl, adjustments);
11649 else
11651 tree new_arg_types = NULL_TREE, new_reversed;
11652 bool last_parm_void = false;
11653 if (args.length () > 0 && args.last () == void_type_node)
11654 last_parm_void = true;
11656 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
11657 j = adjustments.length ();
11658 for (i = 0; i < j; i++)
11660 struct ipa_parm_adjustment *adj = &adjustments[i];
11661 tree ptype;
11662 if (adj->op == IPA_PARM_OP_COPY)
11663 ptype = args[adj->base_index];
11664 else
11665 ptype = adj->type;
11666 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
11668 new_reversed = nreverse (new_arg_types);
11669 if (last_parm_void)
11671 if (new_reversed)
11672 TREE_CHAIN (new_arg_types) = void_list_node;
11673 else
11674 new_reversed = void_list_node;
11677 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
11678 TYPE_ARG_TYPES (new_type) = new_reversed;
11679 TREE_TYPE (node->decl) = new_type;
11681 adjustments.release ();
11683 args.release ();
11684 return adjustments;
11687 /* Initialize and copy the function arguments in NODE to their
11688 corresponding local simd arrays. Returns a fresh gimple_seq with
11689 the instruction sequence generated. */
11691 static gimple_seq
11692 simd_clone_init_simd_arrays (struct cgraph_node *node,
11693 ipa_parm_adjustment_vec adjustments)
11695 gimple_seq seq = NULL;
11696 unsigned i = 0, j = 0, k;
11698 for (tree arg = DECL_ARGUMENTS (node->decl);
11699 arg;
11700 arg = DECL_CHAIN (arg), i++, j++)
11702 if (adjustments[j].op == IPA_PARM_OP_COPY)
11703 continue;
11705 node->simdclone->args[i].vector_arg = arg;
11707 tree array = node->simdclone->args[i].simd_array;
11708 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
11710 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11711 tree ptr = build_fold_addr_expr (array);
11712 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11713 build_int_cst (ptype, 0));
11714 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11715 gimplify_and_add (t, &seq);
11717 else
11719 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
11720 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
11721 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
11723 tree ptr = build_fold_addr_expr (array);
11724 int elemsize;
11725 if (k)
11727 arg = DECL_CHAIN (arg);
11728 j++;
11730 elemsize
11731 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
11732 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
11733 build_int_cst (ptype, k * elemsize));
11734 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
11735 gimplify_and_add (t, &seq);
11739 return seq;
11742 /* Callback info for ipa_simd_modify_stmt_ops below. */
11744 struct modify_stmt_info {
11745 ipa_parm_adjustment_vec adjustments;
11746 gimple stmt;
11747 /* True if the parent statement was modified by
11748 ipa_simd_modify_stmt_ops. */
11749 bool modified;
11752 /* Callback for walk_gimple_op.
11754 Adjust operands from a given statement as specified in the
11755 adjustments vector in the callback data. */
11757 static tree
11758 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
11760 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
11761 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
11762 tree *orig_tp = tp;
11763 if (TREE_CODE (*tp) == ADDR_EXPR)
11764 tp = &TREE_OPERAND (*tp, 0);
11765 struct ipa_parm_adjustment *cand = NULL;
11766 if (TREE_CODE (*tp) == PARM_DECL)
11767 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
11768 else
11770 if (TYPE_P (*tp))
11771 *walk_subtrees = 0;
11774 tree repl = NULL_TREE;
11775 if (cand)
11776 repl = unshare_expr (cand->new_decl);
11777 else
11779 if (tp != orig_tp)
11781 *walk_subtrees = 0;
11782 bool modified = info->modified;
11783 info->modified = false;
11784 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
11785 if (!info->modified)
11787 info->modified = modified;
11788 return NULL_TREE;
11790 info->modified = modified;
11791 repl = *tp;
11793 else
11794 return NULL_TREE;
11797 if (tp != orig_tp)
11799 repl = build_fold_addr_expr (repl);
11800 gimple stmt;
11801 if (is_gimple_debug (info->stmt))
11803 tree vexpr = make_node (DEBUG_EXPR_DECL);
11804 stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
11805 DECL_ARTIFICIAL (vexpr) = 1;
11806 TREE_TYPE (vexpr) = TREE_TYPE (repl);
11807 DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (repl));
11808 repl = vexpr;
11810 else
11812 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl),
11813 NULL), repl);
11814 repl = gimple_assign_lhs (stmt);
11816 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
11817 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11818 *orig_tp = repl;
11820 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
11822 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
11823 *tp = vce;
11825 else
11826 *tp = repl;
11828 info->modified = true;
11829 return NULL_TREE;
11832 /* Traverse the function body and perform all modifications as
11833 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
11834 modified such that the replacement/reduction value will now be an
11835 offset into the corresponding simd_array.
11837 This function will replace all function argument uses with their
11838 corresponding simd array elements, and ajust the return values
11839 accordingly. */
11841 static void
11842 ipa_simd_modify_function_body (struct cgraph_node *node,
11843 ipa_parm_adjustment_vec adjustments,
11844 tree retval_array, tree iter)
11846 basic_block bb;
11847 unsigned int i, j, l;
11849 /* Re-use the adjustments array, but this time use it to replace
11850 every function argument use to an offset into the corresponding
11851 simd_array. */
11852 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
11854 if (!node->simdclone->args[i].vector_arg)
11855 continue;
11857 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
11858 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
11859 adjustments[j].new_decl
11860 = build4 (ARRAY_REF,
11861 basetype,
11862 node->simdclone->args[i].simd_array,
11863 iter,
11864 NULL_TREE, NULL_TREE);
11865 if (adjustments[j].op == IPA_PARM_OP_NONE
11866 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
11867 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
11870 l = adjustments.length ();
11871 for (i = 1; i < num_ssa_names; i++)
11873 tree name = ssa_name (i);
11874 if (name
11875 && SSA_NAME_VAR (name)
11876 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
11878 for (j = 0; j < l; j++)
11879 if (SSA_NAME_VAR (name) == adjustments[j].base
11880 && adjustments[j].new_decl)
11882 tree base_var;
11883 if (adjustments[j].new_ssa_base == NULL_TREE)
11885 base_var
11886 = copy_var_decl (adjustments[j].base,
11887 DECL_NAME (adjustments[j].base),
11888 TREE_TYPE (adjustments[j].base));
11889 adjustments[j].new_ssa_base = base_var;
11891 else
11892 base_var = adjustments[j].new_ssa_base;
11893 if (SSA_NAME_IS_DEFAULT_DEF (name))
11895 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11896 gimple_stmt_iterator gsi = gsi_after_labels (bb);
11897 tree new_decl = unshare_expr (adjustments[j].new_decl);
11898 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
11899 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11900 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
11901 gimple stmt = gimple_build_assign (name, new_decl);
11902 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11904 else
11905 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
11910 struct modify_stmt_info info;
11911 info.adjustments = adjustments;
11913 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
11915 gimple_stmt_iterator gsi;
11917 gsi = gsi_start_bb (bb);
11918 while (!gsi_end_p (gsi))
11920 gimple stmt = gsi_stmt (gsi);
11921 info.stmt = stmt;
11922 struct walk_stmt_info wi;
11924 memset (&wi, 0, sizeof (wi));
11925 info.modified = false;
11926 wi.info = &info;
11927 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
11929 if (gimple_code (stmt) == GIMPLE_RETURN)
11931 tree retval = gimple_return_retval (stmt);
11932 if (!retval)
11934 gsi_remove (&gsi, true);
11935 continue;
11938 /* Replace `return foo' with `retval_array[iter] = foo'. */
11939 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
11940 retval_array, iter, NULL, NULL);
11941 stmt = gimple_build_assign (ref, retval);
11942 gsi_replace (&gsi, stmt, true);
11943 info.modified = true;
11946 if (info.modified)
11948 update_stmt (stmt);
11949 if (maybe_clean_eh_stmt (stmt))
11950 gimple_purge_dead_eh_edges (gimple_bb (stmt));
11952 gsi_next (&gsi);
11957 /* Adjust the argument types in NODE to their appropriate vector
11958 counterparts. */
11960 static void
11961 simd_clone_adjust (struct cgraph_node *node)
11963 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
11965 targetm.simd_clone.adjust (node);
11967 tree retval = simd_clone_adjust_return_type (node);
11968 ipa_parm_adjustment_vec adjustments
11969 = simd_clone_adjust_argument_types (node);
11971 push_gimplify_context ();
11973 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
11975 /* Adjust all uses of vector arguments accordingly. Adjust all
11976 return values accordingly. */
11977 tree iter = create_tmp_var (unsigned_type_node, "iter");
11978 tree iter1 = make_ssa_name (iter, NULL);
11979 tree iter2 = make_ssa_name (iter, NULL);
11980 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
11982 /* Initialize the iteration variable. */
11983 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
11984 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
11985 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
11986 /* Insert the SIMD array and iv initialization at function
11987 entry. */
11988 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
11990 pop_gimplify_context (NULL);
11992 /* Create a new BB right before the original exit BB, to hold the
11993 iteration increment and the condition/branch. */
11994 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
11995 basic_block incr_bb = create_empty_bb (orig_exit);
11996 add_bb_to_loop (incr_bb, body_bb->loop_father);
11997 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
11998 flag. Set it now to be a FALLTHRU_EDGE. */
11999 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
12000 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
12001 for (unsigned i = 0;
12002 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
12004 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
12005 redirect_edge_succ (e, incr_bb);
12007 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
12008 e->probability = REG_BR_PROB_BASE;
12009 gsi = gsi_last_bb (incr_bb);
12010 gimple g = gimple_build_assign_with_ops (PLUS_EXPR, iter2, iter1,
12011 build_int_cst (unsigned_type_node,
12012 1));
12013 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12015 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
12016 struct loop *loop = alloc_loop ();
12017 cfun->has_force_vectorize_loops = true;
12018 loop->safelen = node->simdclone->simdlen;
12019 loop->force_vectorize = true;
12020 loop->header = body_bb;
12022 /* Branch around the body if the mask applies. */
12023 if (node->simdclone->inbranch)
12025 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
12026 tree mask_array
12027 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
12028 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)), NULL);
12029 tree aref = build4 (ARRAY_REF,
12030 TREE_TYPE (TREE_TYPE (mask_array)),
12031 mask_array, iter1,
12032 NULL, NULL);
12033 g = gimple_build_assign (mask, aref);
12034 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12035 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
12036 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
12038 aref = build1 (VIEW_CONVERT_EXPR,
12039 build_nonstandard_integer_type (bitsize, 0), mask);
12040 mask = make_ssa_name (TREE_TYPE (aref), NULL);
12041 g = gimple_build_assign (mask, aref);
12042 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12045 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
12046 NULL, NULL);
12047 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12048 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
12049 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
12052 /* Generate the condition. */
12053 g = gimple_build_cond (LT_EXPR,
12054 iter2,
12055 build_int_cst (unsigned_type_node,
12056 node->simdclone->simdlen),
12057 NULL, NULL);
12058 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12059 e = split_block (incr_bb, gsi_stmt (gsi));
12060 basic_block latch_bb = e->dest;
12061 basic_block new_exit_bb;
12062 new_exit_bb = split_block (latch_bb, NULL)->dest;
12063 loop->latch = latch_bb;
12065 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
12067 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
12068 /* The successor of incr_bb is already pointing to latch_bb; just
12069 change the flags.
12070 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
12071 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
12073 gimple phi = create_phi_node (iter1, body_bb);
12074 edge preheader_edge = find_edge (entry_bb, body_bb);
12075 edge latch_edge = single_succ_edge (latch_bb);
12076 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
12077 UNKNOWN_LOCATION);
12078 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12080 /* Generate the new return. */
12081 gsi = gsi_last_bb (new_exit_bb);
12082 if (retval
12083 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
12084 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
12085 retval = TREE_OPERAND (retval, 0);
12086 else if (retval)
12088 retval = build1 (VIEW_CONVERT_EXPR,
12089 TREE_TYPE (TREE_TYPE (node->decl)),
12090 retval);
12091 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
12092 false, GSI_CONTINUE_LINKING);
12094 g = gimple_build_return (retval);
12095 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
12097 /* Handle aligned clauses by replacing default defs of the aligned
12098 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
12099 lhs. Handle linear by adding PHIs. */
12100 for (unsigned i = 0; i < node->simdclone->nargs; i++)
12101 if (node->simdclone->args[i].alignment
12102 && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
12103 && (node->simdclone->args[i].alignment
12104 & (node->simdclone->args[i].alignment - 1)) == 0
12105 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
12106 == POINTER_TYPE)
12108 unsigned int alignment = node->simdclone->args[i].alignment;
12109 tree orig_arg = node->simdclone->args[i].orig_arg;
12110 tree def = ssa_default_def (cfun, orig_arg);
12111 if (def && !has_zero_uses (def))
12113 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
12114 gimple_seq seq = NULL;
12115 bool need_cvt = false;
12116 gimple call
12117 = gimple_build_call (fn, 2, def, size_int (alignment));
12118 g = call;
12119 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
12120 ptr_type_node))
12121 need_cvt = true;
12122 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg, NULL);
12123 gimple_call_set_lhs (g, t);
12124 gimple_seq_add_stmt_without_update (&seq, g);
12125 if (need_cvt)
12127 t = make_ssa_name (orig_arg, NULL);
12128 g = gimple_build_assign_with_ops (NOP_EXPR, t,
12129 gimple_call_lhs (g),
12130 NULL_TREE);
12131 gimple_seq_add_stmt_without_update (&seq, g);
12133 gsi_insert_seq_on_edge_immediate
12134 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
12136 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
12137 int freq = compute_call_stmt_bb_frequency (current_function_decl,
12138 entry_bb);
12139 node->create_edge (cgraph_node::get_create (fn),
12140 call, entry_bb->count, freq);
12142 imm_use_iterator iter;
12143 use_operand_p use_p;
12144 gimple use_stmt;
12145 tree repl = gimple_get_lhs (g);
12146 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12147 if (is_gimple_debug (use_stmt) || use_stmt == call)
12148 continue;
12149 else
12150 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12151 SET_USE (use_p, repl);
12154 else if (node->simdclone->args[i].arg_type
12155 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
12157 tree orig_arg = node->simdclone->args[i].orig_arg;
12158 tree def = ssa_default_def (cfun, orig_arg);
12159 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12160 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
12161 if (def && !has_zero_uses (def))
12163 iter1 = make_ssa_name (orig_arg, NULL);
12164 iter2 = make_ssa_name (orig_arg, NULL);
12165 phi = create_phi_node (iter1, body_bb);
12166 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
12167 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
12168 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12169 ? PLUS_EXPR : POINTER_PLUS_EXPR;
12170 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
12171 ? TREE_TYPE (orig_arg) : sizetype;
12172 tree addcst
12173 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
12174 g = gimple_build_assign_with_ops (code, iter2, iter1, addcst);
12175 gsi = gsi_last_bb (incr_bb);
12176 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
12178 imm_use_iterator iter;
12179 use_operand_p use_p;
12180 gimple use_stmt;
12181 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
12182 if (use_stmt == phi)
12183 continue;
12184 else
12185 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
12186 SET_USE (use_p, iter1);
12190 calculate_dominance_info (CDI_DOMINATORS);
12191 add_loop (loop, loop->header->loop_father);
12192 update_ssa (TODO_update_ssa);
12194 pop_cfun ();
12197 /* If the function in NODE is tagged as an elemental SIMD function,
12198 create the appropriate SIMD clones. */
12200 static void
12201 expand_simd_clones (struct cgraph_node *node)
12203 tree attr = lookup_attribute ("omp declare simd",
12204 DECL_ATTRIBUTES (node->decl));
12205 if (attr == NULL_TREE
12206 || node->global.inlined_to
12207 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
12208 return;
12210 /* Ignore
12211 #pragma omp declare simd
12212 extern int foo ();
12213 in C, there we don't know the argument types at all. */
12214 if (!node->definition
12215 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
12216 return;
12220 /* Start with parsing the "omp declare simd" attribute(s). */
12221 bool inbranch_clause_specified;
12222 struct cgraph_simd_clone *clone_info
12223 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
12224 &inbranch_clause_specified);
12225 if (clone_info == NULL)
12226 continue;
12228 int orig_simdlen = clone_info->simdlen;
12229 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
12230 /* The target can return 0 (no simd clones should be created),
12231 1 (just one ISA of simd clones should be created) or higher
12232 count of ISA variants. In that case, clone_info is initialized
12233 for the first ISA variant. */
12234 int count
12235 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
12236 base_type, 0);
12237 if (count == 0)
12238 continue;
12240 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
12241 also create one inbranch and one !inbranch clone of it. */
12242 for (int i = 0; i < count * 2; i++)
12244 struct cgraph_simd_clone *clone = clone_info;
12245 if (inbranch_clause_specified && (i & 1) != 0)
12246 continue;
12248 if (i != 0)
12250 clone = simd_clone_struct_alloc (clone_info->nargs
12251 + ((i & 1) != 0));
12252 simd_clone_struct_copy (clone, clone_info);
12253 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
12254 and simd_clone_adjust_argument_types did to the first
12255 clone's info. */
12256 clone->nargs -= clone_info->inbranch;
12257 clone->simdlen = orig_simdlen;
12258 /* And call the target hook again to get the right ISA. */
12259 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
12260 base_type,
12261 i / 2);
12262 if ((i & 1) != 0)
12263 clone->inbranch = 1;
12266 /* simd_clone_mangle might fail if such a clone has been created
12267 already. */
12268 tree id = simd_clone_mangle (node, clone);
12269 if (id == NULL_TREE)
12270 continue;
12272 /* Only when we are sure we want to create the clone actually
12273 clone the function (or definitions) or create another
12274 extern FUNCTION_DECL (for prototypes without definitions). */
12275 struct cgraph_node *n = simd_clone_create (node);
12276 if (n == NULL)
12277 continue;
12279 n->simdclone = clone;
12280 clone->origin = node;
12281 clone->next_clone = NULL;
12282 if (node->simd_clones == NULL)
12284 clone->prev_clone = n;
12285 node->simd_clones = n;
12287 else
12289 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
12290 clone->prev_clone->simdclone->next_clone = n;
12291 node->simd_clones->simdclone->prev_clone = n;
12293 symtab->change_decl_assembler_name (n->decl, id);
12294 /* And finally adjust the return type, parameters and for
12295 definitions also function body. */
12296 if (node->definition)
12297 simd_clone_adjust (n);
12298 else
12300 simd_clone_adjust_return_type (n);
12301 simd_clone_adjust_argument_types (n);
12305 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
12308 /* Entry point for IPA simd clone creation pass. */
12310 static unsigned int
12311 ipa_omp_simd_clone (void)
12313 struct cgraph_node *node;
12314 FOR_EACH_FUNCTION (node)
12315 expand_simd_clones (node);
12316 return 0;
12319 namespace {
12321 const pass_data pass_data_omp_simd_clone =
12323 SIMPLE_IPA_PASS, /* type */
12324 "simdclone", /* name */
12325 OPTGROUP_NONE, /* optinfo_flags */
12326 TV_NONE, /* tv_id */
12327 ( PROP_ssa | PROP_cfg ), /* properties_required */
12328 0, /* properties_provided */
12329 0, /* properties_destroyed */
12330 0, /* todo_flags_start */
12331 0, /* todo_flags_finish */
12334 class pass_omp_simd_clone : public simple_ipa_opt_pass
12336 public:
12337 pass_omp_simd_clone(gcc::context *ctxt)
12338 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
12341 /* opt_pass methods: */
12342 virtual bool gate (function *);
12343 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
12346 bool
12347 pass_omp_simd_clone::gate (function *)
12349 return ((flag_openmp || flag_openmp_simd
12350 || flag_cilkplus
12351 || (in_lto_p && !flag_wpa))
12352 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
12355 } // anon namespace
12357 simple_ipa_opt_pass *
12358 make_pass_omp_simd_clone (gcc::context *ctxt)
12360 return new pass_omp_simd_clone (ctxt);
12363 #include "gt-omp-low.h"