1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
41 #include "tree-pass.h"
44 #include "splay-tree.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context
*outer
;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map
;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
94 /* What to do with variables with implicitly determined sharing
96 enum omp_clause_default_kind default_kind
;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
103 /* True if this parallel directive is nested within another. */
108 struct omp_for_data_loop
110 tree v
, n1
, n2
, step
;
111 enum tree_code cond_code
;
114 /* A structure describing the main elements of a parallel loop. */
118 struct omp_for_data_loop loop
;
123 bool have_nowait
, have_ordered
;
124 enum omp_clause_schedule_kind sched_kind
;
125 struct omp_for_data_loop
*loops
;
129 static splay_tree all_contexts
;
130 static int taskreg_nesting_level
;
131 struct omp_region
*root_omp_region
;
132 static bitmap task_shared_vars
;
134 static void scan_omp (gimple_seq
, omp_context
*);
135 static tree
scan_omp_1_op (tree
*, int *, void *);
137 #define WALK_SUBSTMTS \
141 case GIMPLE_EH_FILTER: \
142 case GIMPLE_TRANSACTION: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
150 scan_omp_op (tree
*tp
, omp_context
*ctx
)
152 struct walk_stmt_info wi
;
154 memset (&wi
, 0, sizeof (wi
));
156 wi
.want_locations
= true;
158 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
161 static void lower_omp (gimple_seq
, omp_context
*);
162 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
163 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
168 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
170 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
171 if (OMP_CLAUSE_CODE (clauses
) == kind
)
177 /* Return true if CTX is for an omp parallel. */
180 is_parallel_ctx (omp_context
*ctx
)
182 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
186 /* Return true if CTX is for an omp task. */
189 is_task_ctx (omp_context
*ctx
)
191 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
195 /* Return true if CTX is for an omp parallel or omp task. */
198 is_taskreg_ctx (omp_context
*ctx
)
200 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
205 /* Return true if REGION is a combined parallel+workshare region. */
208 is_combined_parallel (struct omp_region
*region
)
210 return region
->is_combined_parallel
;
214 /* Extract the header elements of parallel loop FOR_STMT and store
218 extract_omp_for_data (gimple for_stmt
, struct omp_for_data
*fd
,
219 struct omp_for_data_loop
*loops
)
221 tree t
, var
, *collapse_iter
, *collapse_count
;
222 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
223 struct omp_for_data_loop
*loop
;
225 struct omp_for_data_loop dummy_loop
;
226 location_t loc
= gimple_location (for_stmt
);
228 fd
->for_stmt
= for_stmt
;
230 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
231 if (fd
->collapse
> 1)
234 fd
->loops
= &fd
->loop
;
236 fd
->have_nowait
= fd
->have_ordered
= false;
237 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
238 fd
->chunk_size
= NULL_TREE
;
239 collapse_iter
= NULL
;
240 collapse_count
= NULL
;
242 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
243 switch (OMP_CLAUSE_CODE (t
))
245 case OMP_CLAUSE_NOWAIT
:
246 fd
->have_nowait
= true;
248 case OMP_CLAUSE_ORDERED
:
249 fd
->have_ordered
= true;
251 case OMP_CLAUSE_SCHEDULE
:
252 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
253 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
255 case OMP_CLAUSE_COLLAPSE
:
256 if (fd
->collapse
> 1)
258 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
259 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
271 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
272 gcc_assert (fd
->chunk_size
== NULL
);
274 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
275 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
276 gcc_assert (fd
->chunk_size
== NULL
);
277 else if (fd
->chunk_size
== NULL
)
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
284 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
285 ? integer_zero_node
: integer_one_node
;
288 for (i
= 0; i
< fd
->collapse
; i
++)
290 if (fd
->collapse
== 1)
292 else if (loops
!= NULL
)
298 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
299 gcc_assert (SSA_VAR_P (loop
->v
));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
302 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
303 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
305 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
306 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
307 switch (loop
->cond_code
)
313 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
314 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, 1);
316 loop
->n2
= fold_build2_loc (loc
,
317 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
318 build_int_cst (TREE_TYPE (loop
->n2
), 1));
319 loop
->cond_code
= LT_EXPR
;
322 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
323 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, -1);
325 loop
->n2
= fold_build2_loc (loc
,
326 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
327 build_int_cst (TREE_TYPE (loop
->n2
), 1));
328 loop
->cond_code
= GT_EXPR
;
334 t
= gimple_omp_for_incr (for_stmt
, i
);
335 gcc_assert (TREE_OPERAND (t
, 0) == var
);
336 switch (TREE_CODE (t
))
339 case POINTER_PLUS_EXPR
:
340 loop
->step
= TREE_OPERAND (t
, 1);
343 loop
->step
= TREE_OPERAND (t
, 1);
344 loop
->step
= fold_build1_loc (loc
,
345 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
352 if (iter_type
!= long_long_unsigned_type_node
)
354 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
355 iter_type
= long_long_unsigned_type_node
;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
357 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
358 >= TYPE_PRECISION (iter_type
))
362 if (loop
->cond_code
== LT_EXPR
)
363 n
= fold_build2_loc (loc
,
364 PLUS_EXPR
, TREE_TYPE (loop
->v
),
365 loop
->n2
, loop
->step
);
368 if (TREE_CODE (n
) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
370 iter_type
= long_long_unsigned_type_node
;
372 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
373 > TYPE_PRECISION (iter_type
))
377 if (loop
->cond_code
== LT_EXPR
)
380 n2
= fold_build2_loc (loc
,
381 PLUS_EXPR
, TREE_TYPE (loop
->v
),
382 loop
->n2
, loop
->step
);
386 n1
= fold_build2_loc (loc
,
387 MINUS_EXPR
, TREE_TYPE (loop
->v
),
388 loop
->n2
, loop
->step
);
391 if (TREE_CODE (n1
) != INTEGER_CST
392 || TREE_CODE (n2
) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
394 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
395 iter_type
= long_long_unsigned_type_node
;
399 if (collapse_count
&& *collapse_count
== NULL
)
401 if ((i
== 0 || count
!= NULL_TREE
)
402 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
403 && TREE_CONSTANT (loop
->n1
)
404 && TREE_CONSTANT (loop
->n2
)
405 && TREE_CODE (loop
->step
) == INTEGER_CST
)
407 tree itype
= TREE_TYPE (loop
->v
);
409 if (POINTER_TYPE_P (itype
))
411 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
412 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
413 t
= fold_build2_loc (loc
,
415 fold_convert_loc (loc
, itype
, loop
->step
), t
);
416 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
417 fold_convert_loc (loc
, itype
, loop
->n2
));
418 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
419 fold_convert_loc (loc
, itype
, loop
->n1
));
420 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
421 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
422 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
423 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
424 fold_convert_loc (loc
, itype
,
427 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
428 fold_convert_loc (loc
, itype
, loop
->step
));
429 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
430 if (count
!= NULL_TREE
)
431 count
= fold_build2_loc (loc
,
432 MULT_EXPR
, long_long_unsigned_type_node
,
436 if (TREE_CODE (count
) != INTEGER_CST
)
446 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
447 iter_type
= long_long_unsigned_type_node
;
449 iter_type
= long_integer_type_node
;
451 else if (collapse_iter
&& *collapse_iter
!= NULL
)
452 iter_type
= TREE_TYPE (*collapse_iter
);
453 fd
->iter_type
= iter_type
;
454 if (collapse_iter
&& *collapse_iter
== NULL
)
455 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
456 if (collapse_count
&& *collapse_count
== NULL
)
459 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
461 *collapse_count
= create_tmp_var (iter_type
, ".count");
464 if (fd
->collapse
> 1)
466 fd
->loop
.v
= *collapse_iter
;
467 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
468 fd
->loop
.n2
= *collapse_count
;
469 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
470 fd
->loop
.cond_code
= LT_EXPR
;
475 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
476 is the immediate dominator of PAR_ENTRY_BB, return true if there
477 are no data dependencies that would prevent expanding the parallel
478 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
480 When expanding a combined parallel+workshare region, the call to
481 the child function may need additional arguments in the case of
482 GIMPLE_OMP_FOR regions. In some cases, these arguments are
483 computed out of variables passed in from the parent to the child
484 via 'struct .omp_data_s'. For instance:
486 #pragma omp parallel for schedule (guided, i * 4)
491 # BLOCK 2 (PAR_ENTRY_BB)
493 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
495 # BLOCK 3 (WS_ENTRY_BB)
496 .omp_data_i = &.omp_data_o;
497 D.1667 = .omp_data_i->i;
499 #pragma omp for schedule (guided, D.1598)
501 When we outline the parallel region, the call to the child function
502 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
503 that value is computed *after* the call site. So, in principle we
504 cannot do the transformation.
506 To see whether the code in WS_ENTRY_BB blocks the combined
507 parallel+workshare call, we collect all the variables used in the
508 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
509 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
512 FIXME. If we had the SSA form built at this point, we could merely
513 hoist the code in block 3 into block 2 and be done with it. But at
514 this point we don't have dataflow information and though we could
515 hack something up here, it is really not worth the aggravation. */
518 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
520 struct omp_for_data fd
;
521 gimple ws_stmt
= last_stmt (ws_entry_bb
);
523 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
526 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
528 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
530 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
532 if (fd
.iter_type
!= long_integer_type_node
)
535 /* FIXME. We give up too easily here. If any of these arguments
536 are not constants, they will likely involve variables that have
537 been mapped into fields of .omp_data_s for sharing with the child
538 function. With appropriate data flow, it would be possible to
540 if (!is_gimple_min_invariant (fd
.loop
.n1
)
541 || !is_gimple_min_invariant (fd
.loop
.n2
)
542 || !is_gimple_min_invariant (fd
.loop
.step
)
543 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
550 /* Collect additional arguments needed to emit a combined
551 parallel+workshare call. WS_STMT is the workshare directive being
554 static VEC(tree
,gc
) *
555 get_ws_args_for (gimple ws_stmt
)
558 location_t loc
= gimple_location (ws_stmt
);
559 VEC(tree
,gc
) *ws_args
;
561 if (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
)
563 struct omp_for_data fd
;
565 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
567 ws_args
= VEC_alloc (tree
, gc
, 3 + (fd
.chunk_size
!= 0));
569 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n1
);
570 VEC_quick_push (tree
, ws_args
, t
);
572 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n2
);
573 VEC_quick_push (tree
, ws_args
, t
);
575 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
576 VEC_quick_push (tree
, ws_args
, t
);
580 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
581 VEC_quick_push (tree
, ws_args
, t
);
586 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
588 /* Number of sections is equal to the number of edges from the
589 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
590 the exit of the sections region. */
591 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
592 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
593 ws_args
= VEC_alloc (tree
, gc
, 1);
594 VEC_quick_push (tree
, ws_args
, t
);
602 /* Discover whether REGION is a combined parallel+workshare region. */
605 determine_parallel_type (struct omp_region
*region
)
607 basic_block par_entry_bb
, par_exit_bb
;
608 basic_block ws_entry_bb
, ws_exit_bb
;
610 if (region
== NULL
|| region
->inner
== NULL
611 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
612 || region
->inner
->cont
== NULL
)
615 /* We only support parallel+for and parallel+sections. */
616 if (region
->type
!= GIMPLE_OMP_PARALLEL
617 || (region
->inner
->type
!= GIMPLE_OMP_FOR
618 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
621 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
622 WS_EXIT_BB -> PAR_EXIT_BB. */
623 par_entry_bb
= region
->entry
;
624 par_exit_bb
= region
->exit
;
625 ws_entry_bb
= region
->inner
->entry
;
626 ws_exit_bb
= region
->inner
->exit
;
628 if (single_succ (par_entry_bb
) == ws_entry_bb
629 && single_succ (ws_exit_bb
) == par_exit_bb
630 && workshare_safe_to_combine_p (ws_entry_bb
)
631 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
632 || (last_and_only_stmt (ws_entry_bb
)
633 && last_and_only_stmt (par_exit_bb
))))
635 gimple ws_stmt
= last_stmt (ws_entry_bb
);
637 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
639 /* If this is a combined parallel loop, we need to determine
640 whether or not to use the combined library calls. There
641 are two cases where we do not apply the transformation:
642 static loops and any kind of ordered loop. In the first
643 case, we already open code the loop so there is no need
644 to do anything else. In the latter case, the combined
645 parallel loop call would still need extra synchronization
646 to implement ordered semantics, so there would not be any
647 gain in using the combined call. */
648 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
649 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
651 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
652 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
654 region
->is_combined_parallel
= false;
655 region
->inner
->is_combined_parallel
= false;
660 region
->is_combined_parallel
= true;
661 region
->inner
->is_combined_parallel
= true;
662 region
->ws_args
= get_ws_args_for (ws_stmt
);
667 /* Return true if EXPR is variable sized. */
670 is_variable_sized (const_tree expr
)
672 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
675 /* Return true if DECL is a reference type. */
678 is_reference (tree decl
)
680 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
683 /* Lookup variables in the decl or field splay trees. The "maybe" form
684 allows for the variable form to not have been entered, otherwise we
685 assert that the variable must have been entered. */
688 lookup_decl (tree var
, omp_context
*ctx
)
691 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
696 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
699 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
700 return n
? *n
: NULL_TREE
;
704 lookup_field (tree var
, omp_context
*ctx
)
707 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
708 return (tree
) n
->value
;
712 lookup_sfield (tree var
, omp_context
*ctx
)
715 n
= splay_tree_lookup (ctx
->sfield_map
716 ? ctx
->sfield_map
: ctx
->field_map
,
717 (splay_tree_key
) var
);
718 return (tree
) n
->value
;
722 maybe_lookup_field (tree var
, omp_context
*ctx
)
725 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
726 return n
? (tree
) n
->value
: NULL_TREE
;
729 /* Return true if DECL should be copied by pointer. SHARED_CTX is
730 the parallel context if DECL is to be shared. */
733 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
735 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
738 /* We can only use copy-in/copy-out semantics for shared variables
739 when we know the value is not accessible from an outer scope. */
742 /* ??? Trivially accessible from anywhere. But why would we even
743 be passing an address in this case? Should we simply assert
744 this to be false, or should we have a cleanup pass that removes
745 these from the list of mappings? */
746 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
749 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
750 without analyzing the expression whether or not its location
751 is accessible to anyone else. In the case of nested parallel
752 regions it certainly may be. */
753 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
756 /* Do not use copy-in/copy-out for variables that have their
758 if (TREE_ADDRESSABLE (decl
))
761 /* Disallow copy-in/out in nested parallel if
762 decl is shared in outer parallel, otherwise
763 each thread could store the shared variable
764 in its own copy-in location, making the
765 variable no longer really shared. */
766 if (!TREE_READONLY (decl
) && shared_ctx
->is_nested
)
770 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
771 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
778 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
779 c
; c
= OMP_CLAUSE_CHAIN (c
))
780 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
781 && OMP_CLAUSE_DECL (c
) == decl
)
785 goto maybe_mark_addressable_and_ret
;
789 /* For tasks avoid using copy-in/out, unless they are readonly
790 (in which case just copy-in is used). As tasks can be
791 deferred or executed in different thread, when GOMP_task
792 returns, the task hasn't necessarily terminated. */
793 if (!TREE_READONLY (decl
) && is_task_ctx (shared_ctx
))
796 maybe_mark_addressable_and_ret
:
797 outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
798 if (is_gimple_reg (outer
))
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
803 if (!task_shared_vars
)
804 task_shared_vars
= BITMAP_ALLOC (NULL
);
805 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
806 TREE_ADDRESSABLE (outer
) = 1;
815 /* Create a new VAR_DECL and copy information from VAR to it. */
818 copy_var_decl (tree var
, tree name
, tree type
)
820 tree copy
= build_decl (DECL_SOURCE_LOCATION (var
), VAR_DECL
, name
, type
);
822 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
823 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
824 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
825 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
826 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
827 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
828 TREE_USED (copy
) = 1;
829 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
834 /* Construct a new automatic decl similar to VAR. */
837 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
839 tree copy
= copy_var_decl (var
, name
, type
);
841 DECL_CONTEXT (copy
) = current_function_decl
;
842 DECL_CHAIN (copy
) = ctx
->block_vars
;
843 ctx
->block_vars
= copy
;
849 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
851 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
854 /* Build tree nodes to access the field for VAR on the receiver side. */
857 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
859 tree x
, field
= lookup_field (var
, ctx
);
861 /* If the receiver record type was remapped in the child function,
862 remap the field into the new record type. */
863 x
= maybe_lookup_field (field
, ctx
);
867 x
= build_simple_mem_ref (ctx
->receiver_decl
);
868 x
= build3 (COMPONENT_REF
, TREE_TYPE (field
), x
, field
, NULL
);
870 x
= build_simple_mem_ref (x
);
875 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
876 of a parallel, this is a component reference; for workshare constructs
877 this is some variable. */
880 build_outer_var_ref (tree var
, omp_context
*ctx
)
884 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
886 else if (is_variable_sized (var
))
888 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
889 x
= build_outer_var_ref (x
, ctx
);
890 x
= build_simple_mem_ref (x
);
892 else if (is_taskreg_ctx (ctx
))
894 bool by_ref
= use_pointer_for_field (var
, NULL
);
895 x
= build_receiver_ref (var
, by_ref
, ctx
);
898 x
= lookup_decl (var
, ctx
->outer
);
899 else if (is_reference (var
))
900 /* This can happen with orphaned constructs. If var is reference, it is
901 possible it is shared and as such valid. */
906 if (is_reference (var
))
907 x
= build_simple_mem_ref (x
);
912 /* Build tree nodes to access the field for VAR on the sender side. */
915 build_sender_ref (tree var
, omp_context
*ctx
)
917 tree field
= lookup_sfield (var
, ctx
);
918 return build3 (COMPONENT_REF
, TREE_TYPE (field
),
919 ctx
->sender_decl
, field
, NULL
);
922 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
925 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
927 tree field
, type
, sfield
= NULL_TREE
;
929 gcc_assert ((mask
& 1) == 0
930 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
931 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
932 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
934 type
= TREE_TYPE (var
);
936 type
= build_pointer_type (type
);
937 else if ((mask
& 3) == 1 && is_reference (var
))
938 type
= TREE_TYPE (type
);
940 field
= build_decl (DECL_SOURCE_LOCATION (var
),
941 FIELD_DECL
, DECL_NAME (var
), type
);
943 /* Remember what variable this field was created for. This does have a
944 side effect of making dwarf2out ignore this member, so for helpful
945 debugging we clear it later in delete_omp_context. */
946 DECL_ABSTRACT_ORIGIN (field
) = var
;
947 if (type
== TREE_TYPE (var
))
949 DECL_ALIGN (field
) = DECL_ALIGN (var
);
950 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
951 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
954 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
958 insert_field_into_struct (ctx
->record_type
, field
);
959 if (ctx
->srecord_type
)
961 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
962 FIELD_DECL
, DECL_NAME (var
), type
);
963 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
964 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
965 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
966 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
967 insert_field_into_struct (ctx
->srecord_type
, sfield
);
972 if (ctx
->srecord_type
== NULL_TREE
)
976 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
977 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
978 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
980 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
981 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
982 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
983 insert_field_into_struct (ctx
->srecord_type
, sfield
);
984 splay_tree_insert (ctx
->sfield_map
,
985 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
986 (splay_tree_value
) sfield
);
990 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
991 : ctx
->srecord_type
, field
);
995 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
996 (splay_tree_value
) field
);
997 if ((mask
& 2) && ctx
->sfield_map
)
998 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
999 (splay_tree_value
) sfield
);
1003 install_var_local (tree var
, omp_context
*ctx
)
1005 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1006 insert_decl_map (&ctx
->cb
, var
, new_var
);
1010 /* Adjust the replacement for DECL in CTX for the new context. This means
1011 copying the DECL_VALUE_EXPR, and fixing up the type. */
1014 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1016 tree new_decl
, size
;
1018 new_decl
= lookup_decl (decl
, ctx
);
1020 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1022 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1023 && DECL_HAS_VALUE_EXPR_P (decl
))
1025 tree ve
= DECL_VALUE_EXPR (decl
);
1026 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1027 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1028 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1031 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1033 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1034 if (size
== error_mark_node
)
1035 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1036 DECL_SIZE (new_decl
) = size
;
1038 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1039 if (size
== error_mark_node
)
1040 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1041 DECL_SIZE_UNIT (new_decl
) = size
;
1045 /* The callback for remap_decl. Search all containing contexts for a
1046 mapping of the variable; this avoids having to duplicate the splay
1047 tree ahead of time. We know a mapping doesn't already exist in the
1048 given context. Create new mappings to implement default semantics. */
1051 omp_copy_decl (tree var
, copy_body_data
*cb
)
1053 omp_context
*ctx
= (omp_context
*) cb
;
1056 if (TREE_CODE (var
) == LABEL_DECL
)
1058 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1059 DECL_CONTEXT (new_var
) = current_function_decl
;
1060 insert_decl_map (&ctx
->cb
, var
, new_var
);
1064 while (!is_taskreg_ctx (ctx
))
1069 new_var
= maybe_lookup_decl (var
, ctx
);
1074 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1077 return error_mark_node
;
1081 /* Return the parallel region associated with STMT. */
1083 /* Debugging dumps for parallel regions. */
1084 void dump_omp_region (FILE *, struct omp_region
*, int);
1085 void debug_omp_region (struct omp_region
*);
1086 void debug_all_omp_regions (void);
1088 /* Dump the parallel region tree rooted at REGION. */
1091 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1093 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1094 gimple_code_name
[region
->type
]);
1097 dump_omp_region (file
, region
->inner
, indent
+ 4);
1101 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1102 region
->cont
->index
);
1106 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1107 region
->exit
->index
);
1109 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1112 dump_omp_region (file
, region
->next
, indent
);
1116 debug_omp_region (struct omp_region
*region
)
1118 dump_omp_region (stderr
, region
, 0);
1122 debug_all_omp_regions (void)
1124 dump_omp_region (stderr
, root_omp_region
, 0);
1128 /* Create a new parallel region starting at STMT inside region PARENT. */
1131 new_omp_region (basic_block bb
, enum gimple_code type
,
1132 struct omp_region
*parent
)
1134 struct omp_region
*region
= XCNEW (struct omp_region
);
1136 region
->outer
= parent
;
1138 region
->type
= type
;
1142 /* This is a nested region. Add it to the list of inner
1143 regions in PARENT. */
1144 region
->next
= parent
->inner
;
1145 parent
->inner
= region
;
1149 /* This is a toplevel region. Add it to the list of toplevel
1150 regions in ROOT_OMP_REGION. */
1151 region
->next
= root_omp_region
;
1152 root_omp_region
= region
;
1158 /* Release the memory associated with the region tree rooted at REGION. */
1161 free_omp_region_1 (struct omp_region
*region
)
1163 struct omp_region
*i
, *n
;
1165 for (i
= region
->inner
; i
; i
= n
)
1168 free_omp_region_1 (i
);
1174 /* Release the memory for the entire omp region tree. */
1177 free_omp_regions (void)
1179 struct omp_region
*r
, *n
;
1180 for (r
= root_omp_region
; r
; r
= n
)
1183 free_omp_region_1 (r
);
1185 root_omp_region
= NULL
;
1189 /* Create a new context, with OUTER_CTX being the surrounding context. */
1191 static omp_context
*
1192 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1194 omp_context
*ctx
= XCNEW (omp_context
);
1196 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1197 (splay_tree_value
) ctx
);
1202 ctx
->outer
= outer_ctx
;
1203 ctx
->cb
= outer_ctx
->cb
;
1204 ctx
->cb
.block
= NULL
;
1205 ctx
->depth
= outer_ctx
->depth
+ 1;
1209 ctx
->cb
.src_fn
= current_function_decl
;
1210 ctx
->cb
.dst_fn
= current_function_decl
;
1211 ctx
->cb
.src_node
= cgraph_get_node (current_function_decl
);
1212 gcc_checking_assert (ctx
->cb
.src_node
);
1213 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1214 ctx
->cb
.src_cfun
= cfun
;
1215 ctx
->cb
.copy_decl
= omp_copy_decl
;
1216 ctx
->cb
.eh_lp_nr
= 0;
1217 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1221 ctx
->cb
.decl_map
= pointer_map_create ();
1226 static gimple_seq
maybe_catch_exception (gimple_seq
);
1228 /* Finalize task copyfn. */
1231 finalize_task_copyfn (gimple task_stmt
)
1233 struct function
*child_cfun
;
1234 tree child_fn
, old_fn
;
1235 gimple_seq seq
, new_seq
;
1238 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1239 if (child_fn
== NULL_TREE
)
1242 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1244 /* Inform the callgraph about the new function. */
1245 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
1246 = cfun
->curr_properties
;
1248 old_fn
= current_function_decl
;
1249 push_cfun (child_cfun
);
1250 current_function_decl
= child_fn
;
1251 bind
= gimplify_body (child_fn
, false);
1252 seq
= gimple_seq_alloc ();
1253 gimple_seq_add_stmt (&seq
, bind
);
1254 new_seq
= maybe_catch_exception (seq
);
1257 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1258 seq
= gimple_seq_alloc ();
1259 gimple_seq_add_stmt (&seq
, bind
);
1261 gimple_set_body (child_fn
, seq
);
1263 current_function_decl
= old_fn
;
1265 cgraph_add_new_function (child_fn
, false);
1268 /* Destroy a omp_context data structures. Called through the splay tree
1269 value delete callback. */
1272 delete_omp_context (splay_tree_value value
)
1274 omp_context
*ctx
= (omp_context
*) value
;
1276 pointer_map_destroy (ctx
->cb
.decl_map
);
1279 splay_tree_delete (ctx
->field_map
);
1280 if (ctx
->sfield_map
)
1281 splay_tree_delete (ctx
->sfield_map
);
1283 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1284 it produces corrupt debug information. */
1285 if (ctx
->record_type
)
1288 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= DECL_CHAIN (t
))
1289 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1291 if (ctx
->srecord_type
)
1294 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= DECL_CHAIN (t
))
1295 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1298 if (is_task_ctx (ctx
))
1299 finalize_task_copyfn (ctx
->stmt
);
1304 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1308 fixup_child_record_type (omp_context
*ctx
)
1310 tree f
, type
= ctx
->record_type
;
1312 /* ??? It isn't sufficient to just call remap_type here, because
1313 variably_modified_type_p doesn't work the way we expect for
1314 record types. Testing each field for whether it needs remapping
1315 and creating a new record by hand works, however. */
1316 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
1317 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1321 tree name
, new_fields
= NULL
;
1323 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1324 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1325 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1326 TYPE_DECL
, name
, type
);
1327 TYPE_NAME (type
) = name
;
1329 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= DECL_CHAIN (f
))
1331 tree new_f
= copy_node (f
);
1332 DECL_CONTEXT (new_f
) = type
;
1333 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1334 DECL_CHAIN (new_f
) = new_fields
;
1335 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1336 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1338 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1342 /* Arrange to be able to look up the receiver field
1343 given the sender field. */
1344 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1345 (splay_tree_value
) new_f
);
1347 TYPE_FIELDS (type
) = nreverse (new_fields
);
1351 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
1354 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1355 specified by CLAUSES. */
1358 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1361 bool scan_array_reductions
= false;
1363 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1367 switch (OMP_CLAUSE_CODE (c
))
1369 case OMP_CLAUSE_PRIVATE
:
1370 decl
= OMP_CLAUSE_DECL (c
);
1371 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1373 else if (!is_variable_sized (decl
))
1374 install_var_local (decl
, ctx
);
1377 case OMP_CLAUSE_SHARED
:
1378 gcc_assert (is_taskreg_ctx (ctx
));
1379 decl
= OMP_CLAUSE_DECL (c
);
1380 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1381 || !is_variable_sized (decl
));
1382 /* Global variables don't need to be copied,
1383 the receiver side will use them directly. */
1384 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1386 by_ref
= use_pointer_for_field (decl
, ctx
);
1387 if (! TREE_READONLY (decl
)
1388 || TREE_ADDRESSABLE (decl
)
1390 || is_reference (decl
))
1392 install_var_field (decl
, by_ref
, 3, ctx
);
1393 install_var_local (decl
, ctx
);
1396 /* We don't need to copy const scalar vars back. */
1397 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1400 case OMP_CLAUSE_LASTPRIVATE
:
1401 /* Let the corresponding firstprivate clause create
1403 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1407 case OMP_CLAUSE_FIRSTPRIVATE
:
1408 case OMP_CLAUSE_REDUCTION
:
1409 decl
= OMP_CLAUSE_DECL (c
);
1411 if (is_variable_sized (decl
))
1413 if (is_task_ctx (ctx
))
1414 install_var_field (decl
, false, 1, ctx
);
1417 else if (is_taskreg_ctx (ctx
))
1420 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1421 by_ref
= use_pointer_for_field (decl
, NULL
);
1423 if (is_task_ctx (ctx
)
1424 && (global
|| by_ref
|| is_reference (decl
)))
1426 install_var_field (decl
, false, 1, ctx
);
1428 install_var_field (decl
, by_ref
, 2, ctx
);
1431 install_var_field (decl
, by_ref
, 3, ctx
);
1433 install_var_local (decl
, ctx
);
1436 case OMP_CLAUSE_COPYPRIVATE
:
1437 case OMP_CLAUSE_COPYIN
:
1438 decl
= OMP_CLAUSE_DECL (c
);
1439 by_ref
= use_pointer_for_field (decl
, NULL
);
1440 install_var_field (decl
, by_ref
, 3, ctx
);
1443 case OMP_CLAUSE_DEFAULT
:
1444 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1447 case OMP_CLAUSE_FINAL
:
1449 case OMP_CLAUSE_NUM_THREADS
:
1450 case OMP_CLAUSE_SCHEDULE
:
1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1455 case OMP_CLAUSE_NOWAIT
:
1456 case OMP_CLAUSE_ORDERED
:
1457 case OMP_CLAUSE_COLLAPSE
:
1458 case OMP_CLAUSE_UNTIED
:
1459 case OMP_CLAUSE_MERGEABLE
:
1467 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1469 switch (OMP_CLAUSE_CODE (c
))
1471 case OMP_CLAUSE_LASTPRIVATE
:
1472 /* Let the corresponding firstprivate clause create
1474 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1475 scan_array_reductions
= true;
1476 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1480 case OMP_CLAUSE_PRIVATE
:
1481 case OMP_CLAUSE_FIRSTPRIVATE
:
1482 case OMP_CLAUSE_REDUCTION
:
1483 decl
= OMP_CLAUSE_DECL (c
);
1484 if (is_variable_sized (decl
))
1485 install_var_local (decl
, ctx
);
1486 fixup_remapped_decl (decl
, ctx
,
1487 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1488 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1489 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1490 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1491 scan_array_reductions
= true;
1494 case OMP_CLAUSE_SHARED
:
1495 decl
= OMP_CLAUSE_DECL (c
);
1496 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1497 fixup_remapped_decl (decl
, ctx
, false);
1500 case OMP_CLAUSE_COPYPRIVATE
:
1501 case OMP_CLAUSE_COPYIN
:
1502 case OMP_CLAUSE_DEFAULT
:
1504 case OMP_CLAUSE_NUM_THREADS
:
1505 case OMP_CLAUSE_SCHEDULE
:
1506 case OMP_CLAUSE_NOWAIT
:
1507 case OMP_CLAUSE_ORDERED
:
1508 case OMP_CLAUSE_COLLAPSE
:
1509 case OMP_CLAUSE_UNTIED
:
1510 case OMP_CLAUSE_FINAL
:
1511 case OMP_CLAUSE_MERGEABLE
:
1519 if (scan_array_reductions
)
1520 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1521 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1522 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1524 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
1525 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
1527 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
1528 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1529 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
1532 /* Create a new name for omp child function. Returns an identifier. */
1534 static GTY(()) unsigned int tmp_ompfn_id_num
;
1537 create_omp_child_function_name (bool task_copy
)
1539 return (clone_function_name (current_function_decl
,
1540 task_copy
? "_omp_cpyfn" : "_omp_fn"));
1543 /* Build a decl for the omp child function. It'll not contain a body
1544 yet, just the bare decl. */
1547 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
1549 tree decl
, type
, name
, t
;
1551 name
= create_omp_child_function_name (task_copy
);
1553 type
= build_function_type_list (void_type_node
, ptr_type_node
,
1554 ptr_type_node
, NULL_TREE
);
1556 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1558 decl
= build_decl (gimple_location (ctx
->stmt
),
1559 FUNCTION_DECL
, name
, type
);
1562 ctx
->cb
.dst_fn
= decl
;
1564 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
1566 TREE_STATIC (decl
) = 1;
1567 TREE_USED (decl
) = 1;
1568 DECL_ARTIFICIAL (decl
) = 1;
1569 DECL_NAMELESS (decl
) = 1;
1570 DECL_IGNORED_P (decl
) = 0;
1571 TREE_PUBLIC (decl
) = 0;
1572 DECL_UNINLINABLE (decl
) = 1;
1573 DECL_EXTERNAL (decl
) = 0;
1574 DECL_CONTEXT (decl
) = NULL_TREE
;
1575 DECL_INITIAL (decl
) = make_node (BLOCK
);
1577 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1578 RESULT_DECL
, NULL_TREE
, void_type_node
);
1579 DECL_ARTIFICIAL (t
) = 1;
1580 DECL_IGNORED_P (t
) = 1;
1581 DECL_CONTEXT (t
) = decl
;
1582 DECL_RESULT (decl
) = t
;
1584 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1585 PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1586 DECL_ARTIFICIAL (t
) = 1;
1587 DECL_NAMELESS (t
) = 1;
1588 DECL_ARG_TYPE (t
) = ptr_type_node
;
1589 DECL_CONTEXT (t
) = current_function_decl
;
1591 DECL_ARGUMENTS (decl
) = t
;
1593 ctx
->receiver_decl
= t
;
1596 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1597 PARM_DECL
, get_identifier (".omp_data_o"),
1599 DECL_ARTIFICIAL (t
) = 1;
1600 DECL_NAMELESS (t
) = 1;
1601 DECL_ARG_TYPE (t
) = ptr_type_node
;
1602 DECL_CONTEXT (t
) = current_function_decl
;
1604 TREE_ADDRESSABLE (t
) = 1;
1605 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
1606 DECL_ARGUMENTS (decl
) = t
;
1609 /* Allocate memory for the function structure. The call to
1610 allocate_struct_function clobbers CFUN, so we need to restore
1612 push_struct_function (decl
);
1613 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
1618 /* Scan an OpenMP parallel directive. */
1621 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1625 gimple stmt
= gsi_stmt (*gsi
);
1627 /* Ignore parallel directives with empty bodies, unless there
1628 are copyin clauses. */
1630 && empty_body_p (gimple_omp_body (stmt
))
1631 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
1632 OMP_CLAUSE_COPYIN
) == NULL
)
1634 gsi_replace (gsi
, gimple_build_nop (), false);
1638 ctx
= new_omp_context (stmt
, outer_ctx
);
1639 if (taskreg_nesting_level
> 1)
1640 ctx
->is_nested
= true;
1641 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1642 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1643 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1644 name
= create_tmp_var_name (".omp_data_s");
1645 name
= build_decl (gimple_location (stmt
),
1646 TYPE_DECL
, name
, ctx
->record_type
);
1647 DECL_ARTIFICIAL (name
) = 1;
1648 DECL_NAMELESS (name
) = 1;
1649 TYPE_NAME (ctx
->record_type
) = name
;
1650 create_omp_child_function (ctx
, false);
1651 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1653 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
1654 scan_omp (gimple_omp_body (stmt
), ctx
);
1656 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1657 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1660 layout_type (ctx
->record_type
);
1661 fixup_child_record_type (ctx
);
1665 /* Scan an OpenMP task directive. */
1668 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1672 gimple stmt
= gsi_stmt (*gsi
);
1673 location_t loc
= gimple_location (stmt
);
1675 /* Ignore task directives with empty bodies. */
1677 && empty_body_p (gimple_omp_body (stmt
)))
1679 gsi_replace (gsi
, gimple_build_nop (), false);
1683 ctx
= new_omp_context (stmt
, outer_ctx
);
1684 if (taskreg_nesting_level
> 1)
1685 ctx
->is_nested
= true;
1686 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1687 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1688 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1689 name
= create_tmp_var_name (".omp_data_s");
1690 name
= build_decl (gimple_location (stmt
),
1691 TYPE_DECL
, name
, ctx
->record_type
);
1692 DECL_ARTIFICIAL (name
) = 1;
1693 DECL_NAMELESS (name
) = 1;
1694 TYPE_NAME (ctx
->record_type
) = name
;
1695 create_omp_child_function (ctx
, false);
1696 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1698 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
1700 if (ctx
->srecord_type
)
1702 name
= create_tmp_var_name (".omp_data_a");
1703 name
= build_decl (gimple_location (stmt
),
1704 TYPE_DECL
, name
, ctx
->srecord_type
);
1705 DECL_ARTIFICIAL (name
) = 1;
1706 DECL_NAMELESS (name
) = 1;
1707 TYPE_NAME (ctx
->srecord_type
) = name
;
1708 create_omp_child_function (ctx
, true);
1711 scan_omp (gimple_omp_body (stmt
), ctx
);
1713 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1715 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1716 t
= build_int_cst (long_integer_type_node
, 0);
1717 gimple_omp_task_set_arg_size (stmt
, t
);
1718 t
= build_int_cst (long_integer_type_node
, 1);
1719 gimple_omp_task_set_arg_align (stmt
, t
);
1723 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
1724 /* Move VLA fields to the end. */
1725 p
= &TYPE_FIELDS (ctx
->record_type
);
1727 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
1728 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
1731 *p
= TREE_CHAIN (*p
);
1732 TREE_CHAIN (*q
) = NULL_TREE
;
1733 q
= &TREE_CHAIN (*q
);
1736 p
= &DECL_CHAIN (*p
);
1738 layout_type (ctx
->record_type
);
1739 fixup_child_record_type (ctx
);
1740 if (ctx
->srecord_type
)
1741 layout_type (ctx
->srecord_type
);
1742 t
= fold_convert_loc (loc
, long_integer_type_node
,
1743 TYPE_SIZE_UNIT (ctx
->record_type
));
1744 gimple_omp_task_set_arg_size (stmt
, t
);
1745 t
= build_int_cst (long_integer_type_node
,
1746 TYPE_ALIGN_UNIT (ctx
->record_type
));
1747 gimple_omp_task_set_arg_align (stmt
, t
);
1752 /* Scan an OpenMP loop directive. */
1755 scan_omp_for (gimple stmt
, omp_context
*outer_ctx
)
1760 ctx
= new_omp_context (stmt
, outer_ctx
);
1762 scan_sharing_clauses (gimple_omp_for_clauses (stmt
), ctx
);
1764 scan_omp (gimple_omp_for_pre_body (stmt
), ctx
);
1765 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
1767 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
1768 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
1769 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
1770 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
1772 scan_omp (gimple_omp_body (stmt
), ctx
);
1775 /* Scan an OpenMP sections directive. */
1778 scan_omp_sections (gimple stmt
, omp_context
*outer_ctx
)
1782 ctx
= new_omp_context (stmt
, outer_ctx
);
1783 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
1784 scan_omp (gimple_omp_body (stmt
), ctx
);
1787 /* Scan an OpenMP single directive. */
1790 scan_omp_single (gimple stmt
, omp_context
*outer_ctx
)
1795 ctx
= new_omp_context (stmt
, outer_ctx
);
1796 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1797 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1798 name
= create_tmp_var_name (".omp_copy_s");
1799 name
= build_decl (gimple_location (stmt
),
1800 TYPE_DECL
, name
, ctx
->record_type
);
1801 TYPE_NAME (ctx
->record_type
) = name
;
1803 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
1804 scan_omp (gimple_omp_body (stmt
), ctx
);
1806 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1807 ctx
->record_type
= NULL
;
1809 layout_type (ctx
->record_type
);
1813 /* Check OpenMP nesting restrictions. */
1815 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
1817 switch (gimple_code (stmt
))
1819 case GIMPLE_OMP_FOR
:
1820 case GIMPLE_OMP_SECTIONS
:
1821 case GIMPLE_OMP_SINGLE
:
1823 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1824 switch (gimple_code (ctx
->stmt
))
1826 case GIMPLE_OMP_FOR
:
1827 case GIMPLE_OMP_SECTIONS
:
1828 case GIMPLE_OMP_SINGLE
:
1829 case GIMPLE_OMP_ORDERED
:
1830 case GIMPLE_OMP_MASTER
:
1831 case GIMPLE_OMP_TASK
:
1832 if (is_gimple_call (stmt
))
1834 warning (0, "barrier region may not be closely nested inside "
1835 "of work-sharing, critical, ordered, master or "
1836 "explicit task region");
1839 warning (0, "work-sharing region may not be closely nested inside "
1840 "of work-sharing, critical, ordered, master or explicit "
1843 case GIMPLE_OMP_PARALLEL
:
1849 case GIMPLE_OMP_MASTER
:
1850 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1851 switch (gimple_code (ctx
->stmt
))
1853 case GIMPLE_OMP_FOR
:
1854 case GIMPLE_OMP_SECTIONS
:
1855 case GIMPLE_OMP_SINGLE
:
1856 case GIMPLE_OMP_TASK
:
1857 warning (0, "master region may not be closely nested inside "
1858 "of work-sharing or explicit task region");
1860 case GIMPLE_OMP_PARALLEL
:
1866 case GIMPLE_OMP_ORDERED
:
1867 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1868 switch (gimple_code (ctx
->stmt
))
1870 case GIMPLE_OMP_CRITICAL
:
1871 case GIMPLE_OMP_TASK
:
1872 warning (0, "ordered region may not be closely nested inside "
1873 "of critical or explicit task region");
1875 case GIMPLE_OMP_FOR
:
1876 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
1877 OMP_CLAUSE_ORDERED
) == NULL
)
1878 warning (0, "ordered region must be closely nested inside "
1879 "a loop region with an ordered clause");
1881 case GIMPLE_OMP_PARALLEL
:
1887 case GIMPLE_OMP_CRITICAL
:
1888 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1889 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_CRITICAL
1890 && (gimple_omp_critical_name (stmt
)
1891 == gimple_omp_critical_name (ctx
->stmt
)))
1893 warning (0, "critical region may not be nested inside a critical "
1894 "region with the same name");
1904 /* Helper function scan_omp.
1906 Callback for walk_tree or operators in walk_gimple_stmt used to
1907 scan for OpenMP directives in TP. */
1910 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
1912 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
1913 omp_context
*ctx
= (omp_context
*) wi
->info
;
1916 switch (TREE_CODE (t
))
1923 *tp
= remap_decl (t
, &ctx
->cb
);
1927 if (ctx
&& TYPE_P (t
))
1928 *tp
= remap_type (t
, &ctx
->cb
);
1929 else if (!DECL_P (t
))
1934 tree tem
= remap_type (TREE_TYPE (t
), &ctx
->cb
);
1935 if (tem
!= TREE_TYPE (t
))
1937 if (TREE_CODE (t
) == INTEGER_CST
)
1938 *tp
= build_int_cst_wide (tem
,
1939 TREE_INT_CST_LOW (t
),
1940 TREE_INT_CST_HIGH (t
));
1942 TREE_TYPE (t
) = tem
;
1953 /* Helper function for scan_omp.
1955 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1956 the current statement in GSI. */
1959 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1960 struct walk_stmt_info
*wi
)
1962 gimple stmt
= gsi_stmt (*gsi
);
1963 omp_context
*ctx
= (omp_context
*) wi
->info
;
1965 if (gimple_has_location (stmt
))
1966 input_location
= gimple_location (stmt
);
1968 /* Check the OpenMP nesting restrictions. */
1971 if (is_gimple_omp (stmt
))
1972 check_omp_nesting_restrictions (stmt
, ctx
);
1973 else if (is_gimple_call (stmt
))
1975 tree fndecl
= gimple_call_fndecl (stmt
);
1976 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
1977 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
1978 check_omp_nesting_restrictions (stmt
, ctx
);
1982 *handled_ops_p
= true;
1984 switch (gimple_code (stmt
))
1986 case GIMPLE_OMP_PARALLEL
:
1987 taskreg_nesting_level
++;
1988 scan_omp_parallel (gsi
, ctx
);
1989 taskreg_nesting_level
--;
1992 case GIMPLE_OMP_TASK
:
1993 taskreg_nesting_level
++;
1994 scan_omp_task (gsi
, ctx
);
1995 taskreg_nesting_level
--;
1998 case GIMPLE_OMP_FOR
:
1999 scan_omp_for (stmt
, ctx
);
2002 case GIMPLE_OMP_SECTIONS
:
2003 scan_omp_sections (stmt
, ctx
);
2006 case GIMPLE_OMP_SINGLE
:
2007 scan_omp_single (stmt
, ctx
);
2010 case GIMPLE_OMP_SECTION
:
2011 case GIMPLE_OMP_MASTER
:
2012 case GIMPLE_OMP_ORDERED
:
2013 case GIMPLE_OMP_CRITICAL
:
2014 ctx
= new_omp_context (stmt
, ctx
);
2015 scan_omp (gimple_omp_body (stmt
), ctx
);
2022 *handled_ops_p
= false;
2024 for (var
= gimple_bind_vars (stmt
); var
; var
= DECL_CHAIN (var
))
2025 insert_decl_map (&ctx
->cb
, var
, var
);
2029 *handled_ops_p
= false;
2037 /* Scan all the statements starting at the current statement. CTX
2038 contains context information about the OpenMP directives and
2039 clauses found during the scan. */
2042 scan_omp (gimple_seq body
, omp_context
*ctx
)
2044 location_t saved_location
;
2045 struct walk_stmt_info wi
;
2047 memset (&wi
, 0, sizeof (wi
));
2049 wi
.want_locations
= true;
2051 saved_location
= input_location
;
2052 walk_gimple_seq (body
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
2053 input_location
= saved_location
;
2056 /* Re-gimplification and code generation routines. */
2058 /* Build a call to GOMP_barrier. */
2061 build_omp_barrier (void)
2063 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER
), 0);
2066 /* If a context was created for STMT when it was scanned, return it. */
2068 static omp_context
*
2069 maybe_lookup_ctx (gimple stmt
)
2072 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
2073 return n
? (omp_context
*) n
->value
: NULL
;
2077 /* Find the mapping for DECL in CTX or the immediately enclosing
2078 context that has a mapping for DECL.
2080 If CTX is a nested parallel directive, we may have to use the decl
2081 mappings created in CTX's parent context. Suppose that we have the
2082 following parallel nesting (variable UIDs showed for clarity):
2085 #omp parallel shared(iD.1562) -> outer parallel
2086 iD.1562 = iD.1562 + 1;
2088 #omp parallel shared (iD.1562) -> inner parallel
2089 iD.1562 = iD.1562 - 1;
2091 Each parallel structure will create a distinct .omp_data_s structure
2092 for copying iD.1562 in/out of the directive:
2094 outer parallel .omp_data_s.1.i -> iD.1562
2095 inner parallel .omp_data_s.2.i -> iD.1562
2097 A shared variable mapping will produce a copy-out operation before
2098 the parallel directive and a copy-in operation after it. So, in
2099 this case we would have:
2102 .omp_data_o.1.i = iD.1562;
2103 #omp parallel shared(iD.1562) -> outer parallel
2104 .omp_data_i.1 = &.omp_data_o.1
2105 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2107 .omp_data_o.2.i = iD.1562; -> **
2108 #omp parallel shared(iD.1562) -> inner parallel
2109 .omp_data_i.2 = &.omp_data_o.2
2110 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2113 ** This is a problem. The symbol iD.1562 cannot be referenced
2114 inside the body of the outer parallel region. But since we are
2115 emitting this copy operation while expanding the inner parallel
2116 directive, we need to access the CTX structure of the outer
2117 parallel directive to get the correct mapping:
2119 .omp_data_o.2.i = .omp_data_i.1->i
2121 Since there may be other workshare or parallel directives enclosing
2122 the parallel directive, it may be necessary to walk up the context
2123 parent chain. This is not a problem in general because nested
2124 parallelism happens only rarely. */
2127 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2132 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2133 t
= maybe_lookup_decl (decl
, up
);
2135 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
2137 return t
? t
: decl
;
2141 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2142 in outer contexts. */
2145 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2150 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2151 t
= maybe_lookup_decl (decl
, up
);
2153 return t
? t
: decl
;
2157 /* Construct the initialization value for reduction CLAUSE. */
2160 omp_reduction_init (tree clause
, tree type
)
2162 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
2163 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
2170 case TRUTH_ORIF_EXPR
:
2171 case TRUTH_XOR_EXPR
:
2173 return build_zero_cst (type
);
2176 case TRUTH_AND_EXPR
:
2177 case TRUTH_ANDIF_EXPR
:
2179 return fold_convert_loc (loc
, type
, integer_one_node
);
2182 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
2185 if (SCALAR_FLOAT_TYPE_P (type
))
2187 REAL_VALUE_TYPE max
, min
;
2188 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2191 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
2194 real_maxval (&min
, 1, TYPE_MODE (type
));
2195 return build_real (type
, min
);
2199 gcc_assert (INTEGRAL_TYPE_P (type
));
2200 return TYPE_MIN_VALUE (type
);
2204 if (SCALAR_FLOAT_TYPE_P (type
))
2206 REAL_VALUE_TYPE max
;
2207 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2210 real_maxval (&max
, 0, TYPE_MODE (type
));
2211 return build_real (type
, max
);
2215 gcc_assert (INTEGRAL_TYPE_P (type
));
2216 return TYPE_MAX_VALUE (type
);
2224 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2225 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2226 private variables. Initialization statements go in ILIST, while calls
2227 to destructors go in DLIST. */
2230 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
2233 gimple_stmt_iterator diter
;
2234 tree c
, dtor
, copyin_seq
, x
, ptr
;
2235 bool copyin_by_ref
= false;
2236 bool lastprivate_firstprivate
= false;
2239 *dlist
= gimple_seq_alloc ();
2240 diter
= gsi_start (*dlist
);
2243 /* Do all the fixed sized types in the first pass, and the variable sized
2244 types in the second pass. This makes sure that the scalar arguments to
2245 the variable sized types are processed before we use them in the
2246 variable sized operations. */
2247 for (pass
= 0; pass
< 2; ++pass
)
2249 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2251 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
2254 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2258 case OMP_CLAUSE_PRIVATE
:
2259 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
2262 case OMP_CLAUSE_SHARED
:
2263 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
2265 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
2268 case OMP_CLAUSE_FIRSTPRIVATE
:
2269 case OMP_CLAUSE_COPYIN
:
2270 case OMP_CLAUSE_REDUCTION
:
2272 case OMP_CLAUSE_LASTPRIVATE
:
2273 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2275 lastprivate_firstprivate
= true;
2284 new_var
= var
= OMP_CLAUSE_DECL (c
);
2285 if (c_kind
!= OMP_CLAUSE_COPYIN
)
2286 new_var
= lookup_decl (var
, ctx
);
2288 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
2293 else if (is_variable_sized (var
))
2295 /* For variable sized types, we need to allocate the
2296 actual storage here. Call alloca and store the
2297 result in the pointer decl that we created elsewhere. */
2301 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
2306 ptr
= DECL_VALUE_EXPR (new_var
);
2307 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
2308 ptr
= TREE_OPERAND (ptr
, 0);
2309 gcc_assert (DECL_P (ptr
));
2310 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
2312 /* void *tmp = __builtin_alloca */
2313 atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2314 stmt
= gimple_build_call (atmp
, 1, x
);
2315 tmp
= create_tmp_var_raw (ptr_type_node
, NULL
);
2316 gimple_add_tmp_var (tmp
);
2317 gimple_call_set_lhs (stmt
, tmp
);
2319 gimple_seq_add_stmt (ilist
, stmt
);
2321 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
2322 gimplify_assign (ptr
, x
, ilist
);
2325 else if (is_reference (var
))
2327 /* For references that are being privatized for Fortran,
2328 allocate new backing storage for the new pointer
2329 variable. This allows us to avoid changing all the
2330 code that expects a pointer to something that expects
2331 a direct variable. Note that this doesn't apply to
2332 C++, since reference types are disallowed in data
2333 sharing clauses there, except for NRV optimized
2338 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
2339 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
2341 x
= build_receiver_ref (var
, false, ctx
);
2342 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2344 else if (TREE_CONSTANT (x
))
2346 const char *name
= NULL
;
2347 if (DECL_NAME (var
))
2348 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
2350 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
2352 gimple_add_tmp_var (x
);
2353 TREE_ADDRESSABLE (x
) = 1;
2354 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2358 tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2359 x
= build_call_expr_loc (clause_loc
, atmp
, 1, x
);
2362 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
2363 gimplify_assign (new_var
, x
, ilist
);
2365 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2367 else if (c_kind
== OMP_CLAUSE_REDUCTION
2368 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2376 switch (OMP_CLAUSE_CODE (c
))
2378 case OMP_CLAUSE_SHARED
:
2379 /* Shared global vars are just accessed directly. */
2380 if (is_global_var (new_var
))
2382 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2383 needs to be delayed until after fixup_child_record_type so
2384 that we get the correct type during the dereference. */
2385 by_ref
= use_pointer_for_field (var
, ctx
);
2386 x
= build_receiver_ref (var
, by_ref
, ctx
);
2387 SET_DECL_VALUE_EXPR (new_var
, x
);
2388 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2390 /* ??? If VAR is not passed by reference, and the variable
2391 hasn't been initialized yet, then we'll get a warning for
2392 the store into the omp_data_s structure. Ideally, we'd be
2393 able to notice this and not store anything at all, but
2394 we're generating code too early. Suppress the warning. */
2396 TREE_NO_WARNING (var
) = 1;
2399 case OMP_CLAUSE_LASTPRIVATE
:
2400 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2404 case OMP_CLAUSE_PRIVATE
:
2405 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
2406 x
= build_outer_var_ref (var
, ctx
);
2407 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2409 if (is_task_ctx (ctx
))
2410 x
= build_receiver_ref (var
, false, ctx
);
2412 x
= build_outer_var_ref (var
, ctx
);
2416 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
2418 gimplify_and_add (x
, ilist
);
2422 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
2425 gimple_seq tseq
= NULL
;
2428 gimplify_stmt (&dtor
, &tseq
);
2429 gsi_insert_seq_before (&diter
, tseq
, GSI_SAME_STMT
);
2433 case OMP_CLAUSE_FIRSTPRIVATE
:
2434 if (is_task_ctx (ctx
))
2436 if (is_reference (var
) || is_variable_sized (var
))
2438 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
2440 || use_pointer_for_field (var
, NULL
))
2442 x
= build_receiver_ref (var
, false, ctx
);
2443 SET_DECL_VALUE_EXPR (new_var
, x
);
2444 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2448 x
= build_outer_var_ref (var
, ctx
);
2449 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
2450 gimplify_and_add (x
, ilist
);
2454 case OMP_CLAUSE_COPYIN
:
2455 by_ref
= use_pointer_for_field (var
, NULL
);
2456 x
= build_receiver_ref (var
, by_ref
, ctx
);
2457 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
2458 append_to_statement_list (x
, ©in_seq
);
2459 copyin_by_ref
|= by_ref
;
2462 case OMP_CLAUSE_REDUCTION
:
2463 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2465 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2466 x
= build_outer_var_ref (var
, ctx
);
2468 if (is_reference (var
))
2469 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2470 SET_DECL_VALUE_EXPR (placeholder
, x
);
2471 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2472 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
2473 gimple_seq_add_seq (ilist
,
2474 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
));
2475 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
2476 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
2480 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
2481 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
2482 gimplify_assign (new_var
, x
, ilist
);
2492 /* The copyin sequence is not to be executed by the main thread, since
2493 that would result in self-copies. Perhaps not visible to scalars,
2494 but it certainly is to C++ operator=. */
2497 x
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
),
2499 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
2500 build_int_cst (TREE_TYPE (x
), 0));
2501 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
2502 gimplify_and_add (x
, ilist
);
2505 /* If any copyin variable is passed by reference, we must ensure the
2506 master thread doesn't modify it before it is copied over in all
2507 threads. Similarly for variables in both firstprivate and
2508 lastprivate clauses we need to ensure the lastprivate copying
2509 happens after firstprivate copying in all threads. */
2510 if (copyin_by_ref
|| lastprivate_firstprivate
)
2511 gimplify_and_add (build_omp_barrier (), ilist
);
2515 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2516 both parallel and workshare constructs. PREDICATE may be NULL if it's
2520 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
2523 tree x
, c
, label
= NULL
;
2524 bool par_clauses
= false;
2526 /* Early exit if there are no lastprivate clauses. */
2527 clauses
= find_omp_clause (clauses
, OMP_CLAUSE_LASTPRIVATE
);
2528 if (clauses
== NULL
)
2530 /* If this was a workshare clause, see if it had been combined
2531 with its parallel. In that case, look for the clauses on the
2532 parallel statement itself. */
2533 if (is_parallel_ctx (ctx
))
2537 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2540 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2541 OMP_CLAUSE_LASTPRIVATE
);
2542 if (clauses
== NULL
)
2550 tree label_true
, arm1
, arm2
;
2552 label
= create_artificial_label (UNKNOWN_LOCATION
);
2553 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
2554 arm1
= TREE_OPERAND (predicate
, 0);
2555 arm2
= TREE_OPERAND (predicate
, 1);
2556 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2557 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2558 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
2560 gimple_seq_add_stmt (stmt_list
, stmt
);
2561 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
2564 for (c
= clauses
; c
;)
2567 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2569 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
2571 var
= OMP_CLAUSE_DECL (c
);
2572 new_var
= lookup_decl (var
, ctx
);
2574 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
2576 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
2577 gimple_seq_add_seq (stmt_list
,
2578 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
2580 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
2582 x
= build_outer_var_ref (var
, ctx
);
2583 if (is_reference (var
))
2584 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2585 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
2586 gimplify_and_add (x
, stmt_list
);
2588 c
= OMP_CLAUSE_CHAIN (c
);
2589 if (c
== NULL
&& !par_clauses
)
2591 /* If this was a workshare clause, see if it had been combined
2592 with its parallel. In that case, continue looking for the
2593 clauses also on the parallel statement itself. */
2594 if (is_parallel_ctx (ctx
))
2598 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2601 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2602 OMP_CLAUSE_LASTPRIVATE
);
2608 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
2612 /* Generate code to implement the REDUCTION clauses. */
2615 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
2617 gimple_seq sub_seq
= NULL
;
2622 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2623 update in that case, otherwise use a lock. */
2624 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
2625 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
2627 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2629 /* Never use OMP_ATOMIC for array reductions. */
2639 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2641 tree var
, ref
, new_var
;
2642 enum tree_code code
;
2643 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2645 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
2648 var
= OMP_CLAUSE_DECL (c
);
2649 new_var
= lookup_decl (var
, ctx
);
2650 if (is_reference (var
))
2651 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2652 ref
= build_outer_var_ref (var
, ctx
);
2653 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
2655 /* reduction(-:var) sums up the partial results, so it acts
2656 identically to reduction(+:var). */
2657 if (code
== MINUS_EXPR
)
2662 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
2664 addr
= save_expr (addr
);
2665 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
2666 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
2667 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
2668 gimplify_and_add (x
, stmt_seqp
);
2672 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2674 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2676 if (is_reference (var
))
2677 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
2678 SET_DECL_VALUE_EXPR (placeholder
, ref
);
2679 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2680 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
2681 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
2682 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
2683 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
2687 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
2688 ref
= build_outer_var_ref (var
, ctx
);
2689 gimplify_assign (ref
, x
, &sub_seq
);
2693 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
),
2695 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2697 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
2699 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
),
2701 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2705 /* Generate code to implement the COPYPRIVATE clauses. */
2708 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
2713 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2715 tree var
, new_var
, ref
, x
;
2717 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2719 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
2722 var
= OMP_CLAUSE_DECL (c
);
2723 by_ref
= use_pointer_for_field (var
, NULL
);
2725 ref
= build_sender_ref (var
, ctx
);
2726 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
2729 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
2730 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
2732 gimplify_assign (ref
, x
, slist
);
2734 ref
= build_receiver_ref (var
, false, ctx
);
2737 ref
= fold_convert_loc (clause_loc
,
2738 build_pointer_type (TREE_TYPE (new_var
)),
2740 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
2742 if (is_reference (var
))
2744 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
2745 ref
= build_simple_mem_ref_loc (clause_loc
, ref
);
2746 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2748 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
2749 gimplify_and_add (x
, rlist
);
2754 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2755 and REDUCTION from the sender (aka parent) side. */
2758 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
2763 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2765 tree val
, ref
, x
, var
;
2766 bool by_ref
, do_in
= false, do_out
= false;
2767 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2769 switch (OMP_CLAUSE_CODE (c
))
2771 case OMP_CLAUSE_PRIVATE
:
2772 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2775 case OMP_CLAUSE_FIRSTPRIVATE
:
2776 case OMP_CLAUSE_COPYIN
:
2777 case OMP_CLAUSE_LASTPRIVATE
:
2778 case OMP_CLAUSE_REDUCTION
:
2784 val
= OMP_CLAUSE_DECL (c
);
2785 var
= lookup_decl_in_outer_ctx (val
, ctx
);
2787 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
2788 && is_global_var (var
))
2790 if (is_variable_sized (val
))
2792 by_ref
= use_pointer_for_field (val
, NULL
);
2794 switch (OMP_CLAUSE_CODE (c
))
2796 case OMP_CLAUSE_PRIVATE
:
2797 case OMP_CLAUSE_FIRSTPRIVATE
:
2798 case OMP_CLAUSE_COPYIN
:
2802 case OMP_CLAUSE_LASTPRIVATE
:
2803 if (by_ref
|| is_reference (val
))
2805 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2812 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
2817 case OMP_CLAUSE_REDUCTION
:
2819 do_out
= !(by_ref
|| is_reference (val
));
2828 ref
= build_sender_ref (val
, ctx
);
2829 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
2830 gimplify_assign (ref
, x
, ilist
);
2831 if (is_task_ctx (ctx
))
2832 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
2837 ref
= build_sender_ref (val
, ctx
);
2838 gimplify_assign (var
, ref
, olist
);
2843 /* Generate code to implement SHARED from the sender (aka parent)
2844 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2845 list things that got automatically shared. */
2848 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
2850 tree var
, ovar
, nvar
, f
, x
, record_type
;
2852 if (ctx
->record_type
== NULL
)
2855 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
2856 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
2858 ovar
= DECL_ABSTRACT_ORIGIN (f
);
2859 nvar
= maybe_lookup_decl (ovar
, ctx
);
2860 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
2863 /* If CTX is a nested parallel directive. Find the immediately
2864 enclosing parallel or workshare construct that contains a
2865 mapping for OVAR. */
2866 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
2868 if (use_pointer_for_field (ovar
, ctx
))
2870 x
= build_sender_ref (ovar
, ctx
);
2871 var
= build_fold_addr_expr (var
);
2872 gimplify_assign (x
, var
, ilist
);
2876 x
= build_sender_ref (ovar
, ctx
);
2877 gimplify_assign (x
, var
, ilist
);
2879 if (!TREE_READONLY (var
)
2880 /* We don't need to receive a new reference to a result
2881 or parm decl. In fact we may not store to it as we will
2882 invalidate any pending RSO and generate wrong gimple
2884 && !((TREE_CODE (var
) == RESULT_DECL
2885 || TREE_CODE (var
) == PARM_DECL
)
2886 && DECL_BY_REFERENCE (var
)))
2888 x
= build_sender_ref (ovar
, ctx
);
2889 gimplify_assign (var
, x
, olist
);
2896 /* A convenience function to build an empty GIMPLE_COND with just the
2900 gimple_build_cond_empty (tree cond
)
2902 enum tree_code pred_code
;
2905 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
2906 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
2910 /* Build the function calls to GOMP_parallel_start etc to actually
2911 generate the parallel operation. REGION is the parallel region
2912 being expanded. BB is the block where to insert the code. WS_ARGS
2913 will be set if this is a call to a combined parallel+workshare
2914 construct, it contains the list of additional arguments needed by
2915 the workshare construct. */
2918 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
2919 gimple entry_stmt
, VEC(tree
,gc
) *ws_args
)
2921 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
2922 gimple_stmt_iterator gsi
;
2924 enum built_in_function start_ix
;
2926 location_t clause_loc
;
2929 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
2931 /* Determine what flavor of GOMP_parallel_start we will be
2933 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
2934 if (is_combined_parallel (region
))
2936 switch (region
->inner
->type
)
2938 case GIMPLE_OMP_FOR
:
2939 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
2940 start_ix2
= ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2941 + (region
->inner
->sched_kind
2942 == OMP_CLAUSE_SCHEDULE_RUNTIME
2943 ? 3 : region
->inner
->sched_kind
));
2944 start_ix
= (enum built_in_function
)start_ix2
;
2946 case GIMPLE_OMP_SECTIONS
:
2947 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
2954 /* By default, the value of NUM_THREADS is zero (selected at run time)
2955 and there is no conditional. */
2957 val
= build_int_cst (unsigned_type_node
, 0);
2959 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
2961 cond
= OMP_CLAUSE_IF_EXPR (c
);
2963 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
2966 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
2967 clause_loc
= OMP_CLAUSE_LOCATION (c
);
2970 clause_loc
= gimple_location (entry_stmt
);
2972 /* Ensure 'val' is of the correct type. */
2973 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
2975 /* If we found the clause 'if (cond)', build either
2976 (cond != 0) or (cond ? val : 1u). */
2979 gimple_stmt_iterator gsi
;
2981 cond
= gimple_boolify (cond
);
2983 if (integer_zerop (val
))
2984 val
= fold_build2_loc (clause_loc
,
2985 EQ_EXPR
, unsigned_type_node
, cond
,
2986 build_int_cst (TREE_TYPE (cond
), 0));
2989 basic_block cond_bb
, then_bb
, else_bb
;
2990 edge e
, e_then
, e_else
;
2991 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
2993 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
2994 if (gimple_in_ssa_p (cfun
))
2996 tmp_then
= make_ssa_name (tmp_var
, NULL
);
2997 tmp_else
= make_ssa_name (tmp_var
, NULL
);
2998 tmp_join
= make_ssa_name (tmp_var
, NULL
);
3007 e
= split_block (bb
, NULL
);
3012 then_bb
= create_empty_bb (cond_bb
);
3013 else_bb
= create_empty_bb (then_bb
);
3014 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
3015 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
3017 stmt
= gimple_build_cond_empty (cond
);
3018 gsi
= gsi_start_bb (cond_bb
);
3019 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3021 gsi
= gsi_start_bb (then_bb
);
3022 stmt
= gimple_build_assign (tmp_then
, val
);
3023 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3025 gsi
= gsi_start_bb (else_bb
);
3026 stmt
= gimple_build_assign
3027 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
3028 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3030 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
3031 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
3032 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
3033 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
3035 if (gimple_in_ssa_p (cfun
))
3037 gimple phi
= create_phi_node (tmp_join
, bb
);
3038 SSA_NAME_DEF_STMT (tmp_join
) = phi
;
3039 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
3040 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
3046 gsi
= gsi_start_bb (bb
);
3047 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
3048 false, GSI_CONTINUE_LINKING
);
3051 gsi
= gsi_last_bb (bb
);
3052 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3054 t1
= null_pointer_node
;
3056 t1
= build_fold_addr_expr (t
);
3057 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
3059 args
= VEC_alloc (tree
, gc
, 3 + VEC_length (tree
, ws_args
));
3060 VEC_quick_push (tree
, args
, t2
);
3061 VEC_quick_push (tree
, args
, t1
);
3062 VEC_quick_push (tree
, args
, val
);
3063 VEC_splice (tree
, args
, ws_args
);
3065 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
3066 builtin_decl_explicit (start_ix
), args
);
3068 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3069 false, GSI_CONTINUE_LINKING
);
3071 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3073 t
= null_pointer_node
;
3075 t
= build_fold_addr_expr (t
);
3076 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3077 gimple_omp_parallel_child_fn (entry_stmt
), 1, t
);
3078 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3079 false, GSI_CONTINUE_LINKING
);
3081 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3082 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END
),
3084 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3085 false, GSI_CONTINUE_LINKING
);
3089 /* Build the function call to GOMP_task to actually
3090 generate the task operation. BB is the block where to insert the code. */
3093 expand_task_call (basic_block bb
, gimple entry_stmt
)
3095 tree t
, t1
, t2
, t3
, flags
, cond
, c
, c2
, clauses
;
3096 gimple_stmt_iterator gsi
;
3097 location_t loc
= gimple_location (entry_stmt
);
3099 clauses
= gimple_omp_task_clauses (entry_stmt
);
3101 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
3103 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
3105 cond
= boolean_true_node
;
3107 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
3108 c2
= find_omp_clause (clauses
, OMP_CLAUSE_MERGEABLE
);
3109 flags
= build_int_cst (unsigned_type_node
,
3110 (c
? 1 : 0) + (c2
? 4 : 0));
3112 c
= find_omp_clause (clauses
, OMP_CLAUSE_FINAL
);
3115 c
= gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c
));
3116 c
= fold_build3_loc (loc
, COND_EXPR
, unsigned_type_node
, c
,
3117 build_int_cst (unsigned_type_node
, 2),
3118 build_int_cst (unsigned_type_node
, 0));
3119 flags
= fold_build2_loc (loc
, PLUS_EXPR
, unsigned_type_node
, flags
, c
);
3122 gsi
= gsi_last_bb (bb
);
3123 t
= gimple_omp_task_data_arg (entry_stmt
);
3125 t2
= null_pointer_node
;
3127 t2
= build_fold_addr_expr_loc (loc
, t
);
3128 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
3129 t
= gimple_omp_task_copy_fn (entry_stmt
);
3131 t3
= null_pointer_node
;
3133 t3
= build_fold_addr_expr_loc (loc
, t
);
3135 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK
),
3137 gimple_omp_task_arg_size (entry_stmt
),
3138 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
);
3140 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3141 false, GSI_CONTINUE_LINKING
);
3145 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3146 catch handler and return it. This prevents programs from violating the
3147 structured block semantics with throws. */
3150 maybe_catch_exception (gimple_seq body
)
3155 if (!flag_exceptions
)
3158 if (lang_hooks
.eh_protect_cleanup_actions
!= NULL
)
3159 decl
= lang_hooks
.eh_protect_cleanup_actions ();
3161 decl
= builtin_decl_explicit (BUILT_IN_TRAP
);
3163 g
= gimple_build_eh_must_not_throw (decl
);
3164 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
3167 return gimple_seq_alloc_with_stmt (g
);
3170 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3173 vec2chain (VEC(tree
,gc
) *v
)
3175 tree chain
= NULL_TREE
, t
;
3178 FOR_EACH_VEC_ELT_REVERSE (tree
, v
, ix
, t
)
3180 DECL_CHAIN (t
) = chain
;
3188 /* Remove barriers in REGION->EXIT's block. Note that this is only
3189 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3190 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3191 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3195 remove_exit_barrier (struct omp_region
*region
)
3197 gimple_stmt_iterator gsi
;
3198 basic_block exit_bb
;
3202 int any_addressable_vars
= -1;
3204 exit_bb
= region
->exit
;
3206 /* If the parallel region doesn't return, we don't have REGION->EXIT
3211 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3212 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3213 statements that can appear in between are extremely limited -- no
3214 memory operations at all. Here, we allow nothing at all, so the
3215 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3216 gsi
= gsi_last_bb (exit_bb
);
3217 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3219 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
3222 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
3224 gsi
= gsi_last_bb (e
->src
);
3225 if (gsi_end_p (gsi
))
3227 stmt
= gsi_stmt (gsi
);
3228 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
3229 && !gimple_omp_return_nowait_p (stmt
))
3231 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3232 in many cases. If there could be tasks queued, the barrier
3233 might be needed to let the tasks run before some local
3234 variable of the parallel that the task uses as shared
3235 runs out of scope. The task can be spawned either
3236 from within current function (this would be easy to check)
3237 or from some function it calls and gets passed an address
3238 of such a variable. */
3239 if (any_addressable_vars
< 0)
3241 gimple parallel_stmt
= last_stmt (region
->entry
);
3242 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
3243 tree local_decls
, block
, decl
;
3246 any_addressable_vars
= 0;
3247 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
3248 if (TREE_ADDRESSABLE (decl
))
3250 any_addressable_vars
= 1;
3253 for (block
= gimple_block (stmt
);
3254 !any_addressable_vars
3256 && TREE_CODE (block
) == BLOCK
;
3257 block
= BLOCK_SUPERCONTEXT (block
))
3259 for (local_decls
= BLOCK_VARS (block
);
3261 local_decls
= DECL_CHAIN (local_decls
))
3262 if (TREE_ADDRESSABLE (local_decls
))
3264 any_addressable_vars
= 1;
3267 if (block
== gimple_block (parallel_stmt
))
3271 if (!any_addressable_vars
)
3272 gimple_omp_return_set_nowait (stmt
);
3278 remove_exit_barriers (struct omp_region
*region
)
3280 if (region
->type
== GIMPLE_OMP_PARALLEL
)
3281 remove_exit_barrier (region
);
3285 region
= region
->inner
;
3286 remove_exit_barriers (region
);
3287 while (region
->next
)
3289 region
= region
->next
;
3290 remove_exit_barriers (region
);
3295 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3296 calls. These can't be declared as const functions, but
3297 within one parallel body they are constant, so they can be
3298 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3299 which are declared const. Similarly for task body, except
3300 that in untied task omp_get_thread_num () can change at any task
3301 scheduling point. */
3304 optimize_omp_library_calls (gimple entry_stmt
)
3307 gimple_stmt_iterator gsi
;
3308 tree thr_num_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3309 tree thr_num_id
= DECL_ASSEMBLER_NAME (thr_num_tree
);
3310 tree num_thr_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3311 tree num_thr_id
= DECL_ASSEMBLER_NAME (num_thr_tree
);
3312 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
3313 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
3314 OMP_CLAUSE_UNTIED
) != NULL
);
3317 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3319 gimple call
= gsi_stmt (gsi
);
3322 if (is_gimple_call (call
)
3323 && (decl
= gimple_call_fndecl (call
))
3324 && DECL_EXTERNAL (decl
)
3325 && TREE_PUBLIC (decl
)
3326 && DECL_INITIAL (decl
) == NULL
)
3330 if (DECL_NAME (decl
) == thr_num_id
)
3332 /* In #pragma omp task untied omp_get_thread_num () can change
3333 during the execution of the task region. */
3336 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3338 else if (DECL_NAME (decl
) == num_thr_id
)
3339 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3343 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
3344 || gimple_call_num_args (call
) != 0)
3347 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
3350 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
3351 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
3352 TREE_TYPE (TREE_TYPE (built_in
))))
3355 gimple_call_set_fndecl (call
, built_in
);
3360 /* Expand the OpenMP parallel or task directive starting at REGION. */
3363 expand_omp_taskreg (struct omp_region
*region
)
3365 basic_block entry_bb
, exit_bb
, new_bb
;
3366 struct function
*child_cfun
;
3367 tree child_fn
, block
, t
;
3369 gimple_stmt_iterator gsi
;
3370 gimple entry_stmt
, stmt
;
3372 VEC(tree
,gc
) *ws_args
;
3374 entry_stmt
= last_stmt (region
->entry
);
3375 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
3376 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
3377 /* If this function has been already instrumented, make sure
3378 the child function isn't instrumented again. */
3379 child_cfun
->after_tree_profile
= cfun
->after_tree_profile
;
3381 entry_bb
= region
->entry
;
3382 exit_bb
= region
->exit
;
3384 if (is_combined_parallel (region
))
3385 ws_args
= region
->ws_args
;
3389 if (child_cfun
->cfg
)
3391 /* Due to inlining, it may happen that we have already outlined
3392 the region, in which case all we need to do is make the
3393 sub-graph unreachable and emit the parallel call. */
3394 edge entry_succ_e
, exit_succ_e
;
3395 gimple_stmt_iterator gsi
;
3397 entry_succ_e
= single_succ_edge (entry_bb
);
3399 gsi
= gsi_last_bb (entry_bb
);
3400 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
3401 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
3402 gsi_remove (&gsi
, true);
3407 exit_succ_e
= single_succ_edge (exit_bb
);
3408 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
3410 remove_edge_and_dominated_blocks (entry_succ_e
);
3414 unsigned srcidx
, dstidx
, num
;
3416 /* If the parallel region needs data sent from the parent
3417 function, then the very first statement (except possible
3418 tree profile counter updates) of the parallel body
3419 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3420 &.OMP_DATA_O is passed as an argument to the child function,
3421 we need to replace it with the argument as seen by the child
3424 In most cases, this will end up being the identity assignment
3425 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3426 a function call that has been inlined, the original PARM_DECL
3427 .OMP_DATA_I may have been converted into a different local
3428 variable. In which case, we need to keep the assignment. */
3429 if (gimple_omp_taskreg_data_arg (entry_stmt
))
3431 basic_block entry_succ_bb
= single_succ (entry_bb
);
3432 gimple_stmt_iterator gsi
;
3434 gimple parcopy_stmt
= NULL
;
3436 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
3440 gcc_assert (!gsi_end_p (gsi
));
3441 stmt
= gsi_stmt (gsi
);
3442 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3445 if (gimple_num_ops (stmt
) == 2)
3447 tree arg
= gimple_assign_rhs1 (stmt
);
3449 /* We're ignore the subcode because we're
3450 effectively doing a STRIP_NOPS. */
3452 if (TREE_CODE (arg
) == ADDR_EXPR
3453 && TREE_OPERAND (arg
, 0)
3454 == gimple_omp_taskreg_data_arg (entry_stmt
))
3456 parcopy_stmt
= stmt
;
3462 gcc_assert (parcopy_stmt
!= NULL
);
3463 arg
= DECL_ARGUMENTS (child_fn
);
3465 if (!gimple_in_ssa_p (cfun
))
3467 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
3468 gsi_remove (&gsi
, true);
3471 /* ?? Is setting the subcode really necessary ?? */
3472 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
3473 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
3478 /* If we are in ssa form, we must load the value from the default
3479 definition of the argument. That should not be defined now,
3480 since the argument is not used uninitialized. */
3481 gcc_assert (gimple_default_def (cfun
, arg
) == NULL
);
3482 narg
= make_ssa_name (arg
, gimple_build_nop ());
3483 set_default_def (arg
, narg
);
3484 /* ?? Is setting the subcode really necessary ?? */
3485 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
3486 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
3487 update_stmt (parcopy_stmt
);
3491 /* Declare local variables needed in CHILD_CFUN. */
3492 block
= DECL_INITIAL (child_fn
);
3493 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
3494 /* The gimplifier could record temporaries in parallel/task block
3495 rather than in containing function's local_decls chain,
3496 which would mean cgraph missed finalizing them. Do it now. */
3497 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
3498 if (TREE_CODE (t
) == VAR_DECL
3500 && !DECL_EXTERNAL (t
))
3501 varpool_finalize_decl (t
);
3502 DECL_SAVED_TREE (child_fn
) = NULL
;
3503 gimple_set_body (child_fn
, bb_seq (single_succ (entry_bb
)));
3504 TREE_USED (block
) = 1;
3506 /* Reset DECL_CONTEXT on function arguments. */
3507 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
3508 DECL_CONTEXT (t
) = child_fn
;
3510 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3511 so that it can be moved to the child function. */
3512 gsi
= gsi_last_bb (entry_bb
);
3513 stmt
= gsi_stmt (gsi
);
3514 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
3515 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
3516 gsi_remove (&gsi
, true);
3517 e
= split_block (entry_bb
, stmt
);
3519 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3521 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3524 gsi
= gsi_last_bb (exit_bb
);
3525 gcc_assert (!gsi_end_p (gsi
)
3526 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3527 stmt
= gimple_build_return (NULL
);
3528 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
3529 gsi_remove (&gsi
, true);
3532 /* Move the parallel region into CHILD_CFUN. */
3534 if (gimple_in_ssa_p (cfun
))
3536 push_cfun (child_cfun
);
3537 init_tree_ssa (child_cfun
);
3538 init_ssa_operands ();
3539 cfun
->gimple_df
->in_ssa_p
= true;
3544 block
= gimple_block (entry_stmt
);
3546 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
3548 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
3550 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3551 num
= VEC_length (tree
, child_cfun
->local_decls
);
3552 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
3554 t
= VEC_index (tree
, child_cfun
->local_decls
, srcidx
);
3555 if (DECL_CONTEXT (t
) == cfun
->decl
)
3557 if (srcidx
!= dstidx
)
3558 VEC_replace (tree
, child_cfun
->local_decls
, dstidx
, t
);
3562 VEC_truncate (tree
, child_cfun
->local_decls
, dstidx
);
3564 /* Inform the callgraph about the new function. */
3565 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
3566 = cfun
->curr_properties
;
3567 cgraph_add_new_function (child_fn
, true);
3569 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3570 fixed in a following pass. */
3571 push_cfun (child_cfun
);
3572 save_current
= current_function_decl
;
3573 current_function_decl
= child_fn
;
3575 optimize_omp_library_calls (entry_stmt
);
3576 rebuild_cgraph_edges ();
3578 /* Some EH regions might become dead, see PR34608. If
3579 pass_cleanup_cfg isn't the first pass to happen with the
3580 new child, these dead EH edges might cause problems.
3581 Clean them up now. */
3582 if (flag_exceptions
)
3585 bool changed
= false;
3588 changed
|= gimple_purge_dead_eh_edges (bb
);
3590 cleanup_tree_cfg ();
3592 if (gimple_in_ssa_p (cfun
))
3593 update_ssa (TODO_update_ssa
);
3594 current_function_decl
= save_current
;
3598 /* Emit a library call to launch the children threads. */
3599 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
3600 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
3602 expand_task_call (new_bb
, entry_stmt
);
3603 update_ssa (TODO_update_ssa_only_virtuals
);
3607 /* A subroutine of expand_omp_for. Generate code for a parallel
3608 loop with any schedule. Given parameters:
3610 for (V = N1; V cond N2; V += STEP) BODY;
3612 where COND is "<" or ">", we generate pseudocode
3614 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3615 if (more) goto L0; else goto L3;
3622 if (V cond iend) goto L1; else goto L2;
3624 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3627 If this is a combined omp parallel loop, instead of the call to
3628 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3630 For collapsed loops, given parameters:
3632 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3633 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3634 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3637 we generate pseudocode
3643 count3 = (adj + N32 - N31) / STEP3;
3648 count2 = (adj + N22 - N21) / STEP2;
3653 count1 = (adj + N12 - N11) / STEP1;
3654 count = count1 * count2 * count3;
3655 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3656 if (more) goto L0; else goto L3;
3660 V3 = N31 + (T % count3) * STEP3;
3662 V2 = N21 + (T % count2) * STEP2;
3664 V1 = N11 + T * STEP1;
3669 if (V < iend) goto L10; else goto L2;
3672 if (V3 cond3 N32) goto L1; else goto L11;
3676 if (V2 cond2 N22) goto L1; else goto L12;
3682 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3688 expand_omp_for_generic (struct omp_region
*region
,
3689 struct omp_for_data
*fd
,
3690 enum built_in_function start_fn
,
3691 enum built_in_function next_fn
)
3693 tree type
, istart0
, iend0
, iend
;
3694 tree t
, vmain
, vback
, bias
= NULL_TREE
;
3695 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
3696 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
3697 gimple_stmt_iterator gsi
;
3699 bool in_combined_parallel
= is_combined_parallel (region
);
3700 bool broken_loop
= region
->cont
== NULL
;
3702 tree
*counts
= NULL
;
3705 gcc_assert (!broken_loop
|| !in_combined_parallel
);
3706 gcc_assert (fd
->iter_type
== long_integer_type_node
3707 || !in_combined_parallel
);
3709 type
= TREE_TYPE (fd
->loop
.v
);
3710 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
3711 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
3712 TREE_ADDRESSABLE (istart0
) = 1;
3713 TREE_ADDRESSABLE (iend0
) = 1;
3714 if (gimple_in_ssa_p (cfun
))
3716 add_referenced_var (istart0
);
3717 add_referenced_var (iend0
);
3720 /* See if we need to bias by LLONG_MIN. */
3721 if (fd
->iter_type
== long_long_unsigned_type_node
3722 && TREE_CODE (type
) == INTEGER_TYPE
3723 && !TYPE_UNSIGNED (type
))
3727 if (fd
->loop
.cond_code
== LT_EXPR
)
3730 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3734 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3737 if (TREE_CODE (n1
) != INTEGER_CST
3738 || TREE_CODE (n2
) != INTEGER_CST
3739 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
3740 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
3743 entry_bb
= region
->entry
;
3744 cont_bb
= region
->cont
;
3746 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
3747 gcc_assert (broken_loop
3748 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
3749 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
3750 l1_bb
= single_succ (l0_bb
);
3753 l2_bb
= create_empty_bb (cont_bb
);
3754 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
3755 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3759 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
3760 exit_bb
= region
->exit
;
3762 gsi
= gsi_last_bb (entry_bb
);
3764 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3765 if (fd
->collapse
> 1)
3767 /* collapsed loops need work for expansion in SSA form. */
3768 gcc_assert (!gimple_in_ssa_p (cfun
));
3769 counts
= (tree
*) alloca (fd
->collapse
* sizeof (tree
));
3770 for (i
= 0; i
< fd
->collapse
; i
++)
3772 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
3774 if (POINTER_TYPE_P (itype
))
3775 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
3776 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
3778 t
= fold_build2 (PLUS_EXPR
, itype
,
3779 fold_convert (itype
, fd
->loops
[i
].step
), t
);
3780 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
3781 fold_convert (itype
, fd
->loops
[i
].n2
));
3782 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
3783 fold_convert (itype
, fd
->loops
[i
].n1
));
3784 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
3785 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3786 fold_build1 (NEGATE_EXPR
, itype
, t
),
3787 fold_build1 (NEGATE_EXPR
, itype
,
3788 fold_convert (itype
,
3789 fd
->loops
[i
].step
)));
3791 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
3792 fold_convert (itype
, fd
->loops
[i
].step
));
3793 t
= fold_convert (type
, t
);
3794 if (TREE_CODE (t
) == INTEGER_CST
)
3798 counts
[i
] = create_tmp_var (type
, ".count");
3799 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3800 true, GSI_SAME_STMT
);
3801 stmt
= gimple_build_assign (counts
[i
], t
);
3802 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3804 if (SSA_VAR_P (fd
->loop
.n2
))
3810 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
3811 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3812 true, GSI_SAME_STMT
);
3814 stmt
= gimple_build_assign (fd
->loop
.n2
, t
);
3815 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3819 if (in_combined_parallel
)
3821 /* In a combined parallel loop, emit a call to
3822 GOMP_loop_foo_next. */
3823 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
3824 build_fold_addr_expr (istart0
),
3825 build_fold_addr_expr (iend0
));
3829 tree t0
, t1
, t2
, t3
, t4
;
3830 /* If this is not a combined parallel loop, emit a call to
3831 GOMP_loop_foo_start in ENTRY_BB. */
3832 t4
= build_fold_addr_expr (iend0
);
3833 t3
= build_fold_addr_expr (istart0
);
3834 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
3835 if (POINTER_TYPE_P (type
)
3836 && TYPE_PRECISION (type
) != TYPE_PRECISION (fd
->iter_type
))
3838 /* Avoid casting pointers to integer of a different size. */
3840 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
3841 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n2
));
3842 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n1
));
3846 t1
= fold_convert (fd
->iter_type
, fd
->loop
.n2
);
3847 t0
= fold_convert (fd
->iter_type
, fd
->loop
.n1
);
3851 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
3852 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
3854 if (fd
->iter_type
== long_integer_type_node
)
3858 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3859 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3860 6, t0
, t1
, t2
, t
, t3
, t4
);
3863 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3864 5, t0
, t1
, t2
, t3
, t4
);
3872 /* The GOMP_loop_ull_*start functions have additional boolean
3873 argument, true for < loops and false for > loops.
3874 In Fortran, the C bool type can be different from
3875 boolean_type_node. */
3876 bfn_decl
= builtin_decl_explicit (start_fn
);
3877 c_bool_type
= TREE_TYPE (TREE_TYPE (bfn_decl
));
3878 t5
= build_int_cst (c_bool_type
,
3879 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
3882 tree bfn_decl
= builtin_decl_explicit (start_fn
);
3883 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3884 t
= build_call_expr (bfn_decl
, 7, t5
, t0
, t1
, t2
, t
, t3
, t4
);
3887 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3888 6, t5
, t0
, t1
, t2
, t3
, t4
);
3891 if (TREE_TYPE (t
) != boolean_type_node
)
3892 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
3893 t
, build_int_cst (TREE_TYPE (t
), 0));
3894 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3895 true, GSI_SAME_STMT
);
3896 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
3898 /* Remove the GIMPLE_OMP_FOR statement. */
3899 gsi_remove (&gsi
, true);
3901 /* Iteration setup for sequential loop goes in L0_BB. */
3902 gsi
= gsi_start_bb (l0_bb
);
3905 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3906 if (POINTER_TYPE_P (type
))
3907 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3909 t
= fold_convert (type
, t
);
3910 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3911 false, GSI_CONTINUE_LINKING
);
3912 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
3913 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3917 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3918 if (POINTER_TYPE_P (type
))
3919 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3921 t
= fold_convert (type
, t
);
3922 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3923 false, GSI_CONTINUE_LINKING
);
3924 if (fd
->collapse
> 1)
3926 tree tem
= create_tmp_var (type
, ".tem");
3928 stmt
= gimple_build_assign (tem
, fd
->loop
.v
);
3929 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3930 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3932 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
;
3934 if (POINTER_TYPE_P (vtype
))
3935 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (vtype
), 0);
3936 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
3937 t
= fold_convert (itype
, t
);
3938 t
= fold_build2 (MULT_EXPR
, itype
, t
,
3939 fold_convert (itype
, fd
->loops
[i
].step
));
3940 if (POINTER_TYPE_P (vtype
))
3941 t
= fold_build_pointer_plus (fd
->loops
[i
].n1
, t
);
3943 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
3944 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3945 false, GSI_CONTINUE_LINKING
);
3946 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
3947 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3950 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
3951 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3952 false, GSI_CONTINUE_LINKING
);
3953 stmt
= gimple_build_assign (tem
, t
);
3954 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3961 /* Code to control the increment and predicate for the sequential
3962 loop goes in the CONT_BB. */
3963 gsi
= gsi_last_bb (cont_bb
);
3964 stmt
= gsi_stmt (gsi
);
3965 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
3966 vmain
= gimple_omp_continue_control_use (stmt
);
3967 vback
= gimple_omp_continue_control_def (stmt
);
3969 if (POINTER_TYPE_P (type
))
3970 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
3972 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
3973 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3974 true, GSI_SAME_STMT
);
3975 stmt
= gimple_build_assign (vback
, t
);
3976 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3978 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, iend
);
3979 stmt
= gimple_build_cond_empty (t
);
3980 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3982 /* Remove GIMPLE_OMP_CONTINUE. */
3983 gsi_remove (&gsi
, true);
3985 if (fd
->collapse
> 1)
3987 basic_block last_bb
, bb
;
3990 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3992 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
3994 bb
= create_empty_bb (last_bb
);
3995 gsi
= gsi_start_bb (bb
);
3997 if (i
< fd
->collapse
- 1)
3999 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
4000 e
->probability
= REG_BR_PROB_BASE
/ 8;
4002 t
= fd
->loops
[i
+ 1].n1
;
4003 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4004 false, GSI_CONTINUE_LINKING
);
4005 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
4006 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4011 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
4013 if (POINTER_TYPE_P (vtype
))
4014 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, fd
->loops
[i
].step
);
4016 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
,
4018 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4019 false, GSI_CONTINUE_LINKING
);
4020 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
4021 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4025 t
= fd
->loops
[i
].n2
;
4026 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4027 false, GSI_CONTINUE_LINKING
);
4028 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
4030 stmt
= gimple_build_cond_empty (t
);
4031 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4032 e
= make_edge (bb
, l1_bb
, EDGE_TRUE_VALUE
);
4033 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4036 make_edge (bb
, l1_bb
, EDGE_FALLTHRU
);
4041 /* Emit code to get the next parallel iteration in L2_BB. */
4042 gsi
= gsi_start_bb (l2_bb
);
4044 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
4045 build_fold_addr_expr (istart0
),
4046 build_fold_addr_expr (iend0
));
4047 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4048 false, GSI_CONTINUE_LINKING
);
4049 if (TREE_TYPE (t
) != boolean_type_node
)
4050 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
4051 t
, build_int_cst (TREE_TYPE (t
), 0));
4052 stmt
= gimple_build_cond_empty (t
);
4053 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4056 /* Add the loop cleanup function. */
4057 gsi
= gsi_last_bb (exit_bb
);
4058 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4059 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT
);
4061 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END
);
4062 stmt
= gimple_build_call (t
, 0);
4063 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
4064 gsi_remove (&gsi
, true);
4066 /* Connect the new blocks. */
4067 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
4068 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
4074 e
= find_edge (cont_bb
, l3_bb
);
4075 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
4077 phis
= phi_nodes (l3_bb
);
4078 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4080 gimple phi
= gsi_stmt (gsi
);
4081 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
4082 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
4086 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
4087 if (fd
->collapse
> 1)
4089 e
= find_edge (cont_bb
, l1_bb
);
4091 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
4095 e
= find_edge (cont_bb
, l1_bb
);
4096 e
->flags
= EDGE_TRUE_VALUE
;
4098 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4099 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
4100 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4102 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
4103 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
4104 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
4105 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
4106 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
4107 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
4108 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
4109 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
4114 /* A subroutine of expand_omp_for. Generate code for a parallel
4115 loop with static schedule and no specified chunk size. Given
4118 for (V = N1; V cond N2; V += STEP) BODY;
4120 where COND is "<" or ">", we generate pseudocode
4126 if ((__typeof (V)) -1 > 0 && cond is >)
4127 n = -(adj + N2 - N1) / -STEP;
4129 n = (adj + N2 - N1) / STEP;
4132 if (threadid < tt) goto L3; else goto L4;
4137 s0 = q * threadid + tt;
4140 if (s0 >= e0) goto L2; else goto L0;
4146 if (V cond e) goto L1;
4151 expand_omp_for_static_nochunk (struct omp_region
*region
,
4152 struct omp_for_data
*fd
)
4154 tree n
, q
, s0
, e0
, e
, t
, tt
, nthreads
, threadid
;
4155 tree type
, itype
, vmain
, vback
;
4156 basic_block entry_bb
, second_bb
, third_bb
, exit_bb
, seq_start_bb
;
4157 basic_block body_bb
, cont_bb
;
4159 gimple_stmt_iterator gsi
;
4163 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4164 if (POINTER_TYPE_P (type
))
4165 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4167 entry_bb
= region
->entry
;
4168 cont_bb
= region
->cont
;
4169 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4170 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4171 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
4172 body_bb
= single_succ (seq_start_bb
);
4173 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4174 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4175 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4176 exit_bb
= region
->exit
;
4178 /* Iteration space partitioning goes in ENTRY_BB. */
4179 gsi
= gsi_last_bb (entry_bb
);
4180 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4182 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
4183 t
= fold_convert (itype
, t
);
4184 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4185 true, GSI_SAME_STMT
);
4187 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
4188 t
= fold_convert (itype
, t
);
4189 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4190 true, GSI_SAME_STMT
);
4193 = force_gimple_operand_gsi (&gsi
, fold_convert (type
, fd
->loop
.n1
),
4194 true, NULL_TREE
, true, GSI_SAME_STMT
);
4196 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.n2
),
4197 true, NULL_TREE
, true, GSI_SAME_STMT
);
4199 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.step
),
4200 true, NULL_TREE
, true, GSI_SAME_STMT
);
4202 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4203 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4204 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4205 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4206 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4207 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4208 fold_build1 (NEGATE_EXPR
, itype
, t
),
4209 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4211 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4212 t
= fold_convert (itype
, t
);
4213 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4215 q
= create_tmp_var (itype
, "q");
4216 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
4217 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4218 gsi_insert_before (&gsi
, gimple_build_assign (q
, t
), GSI_SAME_STMT
);
4220 tt
= create_tmp_var (itype
, "tt");
4221 t
= fold_build2 (TRUNC_MOD_EXPR
, itype
, n
, nthreads
);
4222 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4223 gsi_insert_before (&gsi
, gimple_build_assign (tt
, t
), GSI_SAME_STMT
);
4225 t
= build2 (LT_EXPR
, boolean_type_node
, threadid
, tt
);
4226 stmt
= gimple_build_cond_empty (t
);
4227 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4229 second_bb
= split_block (entry_bb
, stmt
)->dest
;
4230 gsi
= gsi_last_bb (second_bb
);
4231 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4233 gsi_insert_before (&gsi
, gimple_build_assign (tt
, build_int_cst (itype
, 0)),
4235 stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, q
, q
,
4236 build_int_cst (itype
, 1));
4237 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4239 third_bb
= split_block (second_bb
, stmt
)->dest
;
4240 gsi
= gsi_last_bb (third_bb
);
4241 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4243 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
4244 t
= build2 (PLUS_EXPR
, itype
, t
, tt
);
4245 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4247 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
4248 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4250 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
4251 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4253 /* Remove the GIMPLE_OMP_FOR statement. */
4254 gsi_remove (&gsi
, true);
4256 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4257 gsi
= gsi_start_bb (seq_start_bb
);
4259 t
= fold_convert (itype
, s0
);
4260 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4261 if (POINTER_TYPE_P (type
))
4262 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4264 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4265 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4266 false, GSI_CONTINUE_LINKING
);
4267 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4268 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4270 t
= fold_convert (itype
, e0
);
4271 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4272 if (POINTER_TYPE_P (type
))
4273 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4275 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4276 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4277 false, GSI_CONTINUE_LINKING
);
4279 /* The code controlling the sequential loop replaces the
4280 GIMPLE_OMP_CONTINUE. */
4281 gsi
= gsi_last_bb (cont_bb
);
4282 stmt
= gsi_stmt (gsi
);
4283 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4284 vmain
= gimple_omp_continue_control_use (stmt
);
4285 vback
= gimple_omp_continue_control_def (stmt
);
4287 if (POINTER_TYPE_P (type
))
4288 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
4290 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
4291 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4292 true, GSI_SAME_STMT
);
4293 stmt
= gimple_build_assign (vback
, t
);
4294 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4296 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, e
);
4297 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4299 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4300 gsi_remove (&gsi
, true);
4302 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4303 gsi
= gsi_last_bb (exit_bb
);
4304 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4305 force_gimple_operand_gsi (&gsi
, build_omp_barrier (), false, NULL_TREE
,
4306 false, GSI_SAME_STMT
);
4307 gsi_remove (&gsi
, true);
4309 /* Connect all the blocks. */
4310 ep
= make_edge (entry_bb
, third_bb
, EDGE_FALSE_VALUE
);
4311 ep
->probability
= REG_BR_PROB_BASE
/ 4 * 3;
4312 ep
= find_edge (entry_bb
, second_bb
);
4313 ep
->flags
= EDGE_TRUE_VALUE
;
4314 ep
->probability
= REG_BR_PROB_BASE
/ 4;
4315 find_edge (third_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
4316 find_edge (third_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
4318 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4319 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4321 set_immediate_dominator (CDI_DOMINATORS
, second_bb
, entry_bb
);
4322 set_immediate_dominator (CDI_DOMINATORS
, third_bb
, entry_bb
);
4323 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, third_bb
);
4324 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4325 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4326 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4327 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4331 /* A subroutine of expand_omp_for. Generate code for a parallel
4332 loop with static schedule and a specified chunk size. Given
4335 for (V = N1; V cond N2; V += STEP) BODY;
4337 where COND is "<" or ">", we generate pseudocode
4343 if ((__typeof (V)) -1 > 0 && cond is >)
4344 n = -(adj + N2 - N1) / -STEP;
4346 n = (adj + N2 - N1) / STEP;
4348 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4349 here so that V is defined
4350 if the loop is not entered
4352 s0 = (trip * nthreads + threadid) * CHUNK;
4353 e0 = min(s0 + CHUNK, n);
4354 if (s0 < n) goto L1; else goto L4;
4361 if (V cond e) goto L2; else goto L3;
4369 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
4371 tree n
, s0
, e0
, e
, t
;
4372 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
4373 tree type
, itype
, v_main
, v_back
, v_extra
;
4374 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
4375 basic_block trip_update_bb
, cont_bb
, fin_bb
;
4376 gimple_stmt_iterator si
;
4380 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4381 if (POINTER_TYPE_P (type
))
4382 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4384 entry_bb
= region
->entry
;
4385 se
= split_block (entry_bb
, last_stmt (entry_bb
));
4387 iter_part_bb
= se
->dest
;
4388 cont_bb
= region
->cont
;
4389 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
4390 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
4391 == FALLTHRU_EDGE (cont_bb
)->dest
);
4392 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
4393 body_bb
= single_succ (seq_start_bb
);
4394 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4395 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4396 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4397 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
4398 exit_bb
= region
->exit
;
4400 /* Trip and adjustment setup goes in ENTRY_BB. */
4401 si
= gsi_last_bb (entry_bb
);
4402 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_FOR
);
4404 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
4405 t
= fold_convert (itype
, t
);
4406 nthreads
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4407 true, GSI_SAME_STMT
);
4409 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
4410 t
= fold_convert (itype
, t
);
4411 threadid
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4412 true, GSI_SAME_STMT
);
4415 = force_gimple_operand_gsi (&si
, fold_convert (type
, fd
->loop
.n1
),
4416 true, NULL_TREE
, true, GSI_SAME_STMT
);
4418 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.n2
),
4419 true, NULL_TREE
, true, GSI_SAME_STMT
);
4421 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.step
),
4422 true, NULL_TREE
, true, GSI_SAME_STMT
);
4424 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->chunk_size
),
4425 true, NULL_TREE
, true, GSI_SAME_STMT
);
4427 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4428 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4429 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4430 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4431 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4432 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4433 fold_build1 (NEGATE_EXPR
, itype
, t
),
4434 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4436 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4437 t
= fold_convert (itype
, t
);
4438 n
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4439 true, GSI_SAME_STMT
);
4441 trip_var
= create_tmp_var (itype
, ".trip");
4442 if (gimple_in_ssa_p (cfun
))
4444 add_referenced_var (trip_var
);
4445 trip_init
= make_ssa_name (trip_var
, NULL
);
4446 trip_main
= make_ssa_name (trip_var
, NULL
);
4447 trip_back
= make_ssa_name (trip_var
, NULL
);
4451 trip_init
= trip_var
;
4452 trip_main
= trip_var
;
4453 trip_back
= trip_var
;
4456 stmt
= gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
4457 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4459 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
4460 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4461 if (POINTER_TYPE_P (type
))
4462 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4464 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4465 v_extra
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4466 true, GSI_SAME_STMT
);
4468 /* Remove the GIMPLE_OMP_FOR. */
4469 gsi_remove (&si
, true);
4471 /* Iteration space partitioning goes in ITER_PART_BB. */
4472 si
= gsi_last_bb (iter_part_bb
);
4474 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
4475 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
4476 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
4477 s0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4478 false, GSI_CONTINUE_LINKING
);
4480 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
4481 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4482 e0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4483 false, GSI_CONTINUE_LINKING
);
4485 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
4486 gsi_insert_after (&si
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
4488 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4489 si
= gsi_start_bb (seq_start_bb
);
4491 t
= fold_convert (itype
, s0
);
4492 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4493 if (POINTER_TYPE_P (type
))
4494 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4496 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4497 t
= force_gimple_operand_gsi (&si
, t
, false, NULL_TREE
,
4498 false, GSI_CONTINUE_LINKING
);
4499 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4500 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4502 t
= fold_convert (itype
, e0
);
4503 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4504 if (POINTER_TYPE_P (type
))
4505 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4507 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4508 e
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4509 false, GSI_CONTINUE_LINKING
);
4511 /* The code controlling the sequential loop goes in CONT_BB,
4512 replacing the GIMPLE_OMP_CONTINUE. */
4513 si
= gsi_last_bb (cont_bb
);
4514 stmt
= gsi_stmt (si
);
4515 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4516 v_main
= gimple_omp_continue_control_use (stmt
);
4517 v_back
= gimple_omp_continue_control_def (stmt
);
4519 if (POINTER_TYPE_P (type
))
4520 t
= fold_build_pointer_plus (v_main
, fd
->loop
.step
);
4522 t
= fold_build2 (PLUS_EXPR
, type
, v_main
, fd
->loop
.step
);
4523 stmt
= gimple_build_assign (v_back
, t
);
4524 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4526 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, v_back
, e
);
4527 gsi_insert_before (&si
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4529 /* Remove GIMPLE_OMP_CONTINUE. */
4530 gsi_remove (&si
, true);
4532 /* Trip update code goes into TRIP_UPDATE_BB. */
4533 si
= gsi_start_bb (trip_update_bb
);
4535 t
= build_int_cst (itype
, 1);
4536 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
4537 stmt
= gimple_build_assign (trip_back
, t
);
4538 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4540 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4541 si
= gsi_last_bb (exit_bb
);
4542 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
4543 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4544 false, GSI_SAME_STMT
);
4545 gsi_remove (&si
, true);
4547 /* Connect the new blocks. */
4548 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
4549 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4551 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4552 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
4554 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
4556 if (gimple_in_ssa_p (cfun
))
4558 gimple_stmt_iterator psi
;
4561 edge_var_map_vector head
;
4565 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4566 remove arguments of the phi nodes in fin_bb. We need to create
4567 appropriate phi nodes in iter_part_bb instead. */
4568 se
= single_pred_edge (fin_bb
);
4569 re
= single_succ_edge (trip_update_bb
);
4570 head
= redirect_edge_var_map_vector (re
);
4571 ene
= single_succ_edge (entry_bb
);
4573 psi
= gsi_start_phis (fin_bb
);
4574 for (i
= 0; !gsi_end_p (psi
) && VEC_iterate (edge_var_map
, head
, i
, vm
);
4575 gsi_next (&psi
), ++i
)
4578 source_location locus
;
4580 phi
= gsi_stmt (psi
);
4581 t
= gimple_phi_result (phi
);
4582 gcc_assert (t
== redirect_edge_var_map_result (vm
));
4583 nphi
= create_phi_node (t
, iter_part_bb
);
4584 SSA_NAME_DEF_STMT (t
) = nphi
;
4586 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
4587 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
4589 /* A special case -- fd->loop.v is not yet computed in
4590 iter_part_bb, we need to use v_extra instead. */
4591 if (t
== fd
->loop
.v
)
4593 add_phi_arg (nphi
, t
, ene
, locus
);
4594 locus
= redirect_edge_var_map_location (vm
);
4595 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
4597 gcc_assert (!gsi_end_p (psi
) && i
== VEC_length (edge_var_map
, head
));
4598 redirect_edge_var_map_clear (re
);
4601 psi
= gsi_start_phis (fin_bb
);
4602 if (gsi_end_p (psi
))
4604 remove_phi_node (&psi
, false);
4607 /* Make phi node for trip. */
4608 phi
= create_phi_node (trip_main
, iter_part_bb
);
4609 SSA_NAME_DEF_STMT (trip_main
) = phi
;
4610 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
4612 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
4616 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
4617 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
4618 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
4619 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4620 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4621 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
4622 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
4623 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4624 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4628 /* Expand the OpenMP loop defined by REGION. */
4631 expand_omp_for (struct omp_region
*region
)
4633 struct omp_for_data fd
;
4634 struct omp_for_data_loop
*loops
;
4637 = (struct omp_for_data_loop
*)
4638 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
4639 * sizeof (struct omp_for_data_loop
));
4640 extract_omp_for_data (last_stmt (region
->entry
), &fd
, loops
);
4641 region
->sched_kind
= fd
.sched_kind
;
4643 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
4644 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4645 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4648 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
4649 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4650 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4653 if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
4656 && region
->cont
!= NULL
)
4658 if (fd
.chunk_size
== NULL
)
4659 expand_omp_for_static_nochunk (region
, &fd
);
4661 expand_omp_for_static_chunk (region
, &fd
);
4665 int fn_index
, start_ix
, next_ix
;
4667 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
4668 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
4669 ? 3 : fd
.sched_kind
;
4670 fn_index
+= fd
.have_ordered
* 4;
4671 start_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_START
) + fn_index
;
4672 next_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
) + fn_index
;
4673 if (fd
.iter_type
== long_long_unsigned_type_node
)
4675 start_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4676 - (int)BUILT_IN_GOMP_LOOP_STATIC_START
);
4677 next_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4678 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
);
4680 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
4681 (enum built_in_function
) next_ix
);
4684 update_ssa (TODO_update_ssa_only_virtuals
);
4688 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4690 v = GOMP_sections_start (n);
4707 v = GOMP_sections_next ();
4712 If this is a combined parallel sections, replace the call to
4713 GOMP_sections_start with call to GOMP_sections_next. */
4716 expand_omp_sections (struct omp_region
*region
)
4718 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
4719 VEC (tree
,heap
) *label_vec
;
4721 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
4722 gimple_stmt_iterator si
, switch_si
;
4723 gimple sections_stmt
, stmt
, cont
;
4726 struct omp_region
*inner
;
4728 bool exit_reachable
= region
->cont
!= NULL
;
4730 gcc_assert (exit_reachable
== (region
->exit
!= NULL
));
4731 entry_bb
= region
->entry
;
4732 l0_bb
= single_succ (entry_bb
);
4733 l1_bb
= region
->cont
;
4734 l2_bb
= region
->exit
;
4737 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
4738 l2
= gimple_block_label (l2_bb
);
4741 /* This can happen if there are reductions. */
4742 len
= EDGE_COUNT (l0_bb
->succs
);
4743 gcc_assert (len
> 0);
4744 e
= EDGE_SUCC (l0_bb
, len
- 1);
4745 si
= gsi_last_bb (e
->dest
);
4748 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4749 l2
= gimple_block_label (e
->dest
);
4751 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
4753 si
= gsi_last_bb (e
->dest
);
4755 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4757 l2
= gimple_block_label (e
->dest
);
4762 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
4766 default_bb
= create_empty_bb (l0_bb
);
4767 l2
= gimple_block_label (default_bb
);
4770 /* We will build a switch() with enough cases for all the
4771 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4772 and a default case to abort if something goes wrong. */
4773 len
= EDGE_COUNT (l0_bb
->succs
);
4775 /* Use VEC_quick_push on label_vec throughout, since we know the size
4777 label_vec
= VEC_alloc (tree
, heap
, len
);
4779 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4780 GIMPLE_OMP_SECTIONS statement. */
4781 si
= gsi_last_bb (entry_bb
);
4782 sections_stmt
= gsi_stmt (si
);
4783 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
4784 vin
= gimple_omp_sections_control (sections_stmt
);
4785 if (!is_combined_parallel (region
))
4787 /* If we are not inside a combined parallel+sections region,
4788 call GOMP_sections_start. */
4789 t
= build_int_cst (unsigned_type_node
,
4790 exit_reachable
? len
- 1 : len
);
4791 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START
);
4792 stmt
= gimple_build_call (u
, 1, t
);
4796 /* Otherwise, call GOMP_sections_next. */
4797 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
4798 stmt
= gimple_build_call (u
, 0);
4800 gimple_call_set_lhs (stmt
, vin
);
4801 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4802 gsi_remove (&si
, true);
4804 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4806 switch_si
= gsi_last_bb (l0_bb
);
4807 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
4810 cont
= last_stmt (l1_bb
);
4811 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
4812 vmain
= gimple_omp_continue_control_use (cont
);
4813 vnext
= gimple_omp_continue_control_def (cont
);
4824 t
= build_case_label (build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
4825 VEC_quick_push (tree
, label_vec
, t
);
4829 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4830 for (inner
= region
->inner
, casei
= 1;
4832 inner
= inner
->next
, i
++, casei
++)
4834 basic_block s_entry_bb
, s_exit_bb
;
4836 /* Skip optional reduction region. */
4837 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
4844 s_entry_bb
= inner
->entry
;
4845 s_exit_bb
= inner
->exit
;
4847 t
= gimple_block_label (s_entry_bb
);
4848 u
= build_int_cst (unsigned_type_node
, casei
);
4849 u
= build_case_label (u
, NULL
, t
);
4850 VEC_quick_push (tree
, label_vec
, u
);
4852 si
= gsi_last_bb (s_entry_bb
);
4853 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
4854 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
4855 gsi_remove (&si
, true);
4856 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
4858 if (s_exit_bb
== NULL
)
4861 si
= gsi_last_bb (s_exit_bb
);
4862 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4863 gsi_remove (&si
, true);
4865 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
4868 /* Error handling code goes in DEFAULT_BB. */
4869 t
= gimple_block_label (default_bb
);
4870 u
= build_case_label (NULL
, NULL
, t
);
4871 make_edge (l0_bb
, default_bb
, 0);
4873 stmt
= gimple_build_switch_vec (vmain
, u
, label_vec
);
4874 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
4875 gsi_remove (&switch_si
, true);
4876 VEC_free (tree
, heap
, label_vec
);
4878 si
= gsi_start_bb (default_bb
);
4879 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP
), 0);
4880 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4886 /* Code to get the next section goes in L1_BB. */
4887 si
= gsi_last_bb (l1_bb
);
4888 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
4890 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
4891 stmt
= gimple_build_call (bfn_decl
, 0);
4892 gimple_call_set_lhs (stmt
, vnext
);
4893 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4894 gsi_remove (&si
, true);
4896 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
4898 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4899 si
= gsi_last_bb (l2_bb
);
4900 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
4901 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT
);
4903 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END
);
4904 stmt
= gimple_build_call (t
, 0);
4905 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4906 gsi_remove (&si
, true);
4909 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
4913 /* Expand code for an OpenMP single directive. We've already expanded
4914 much of the code, here we simply place the GOMP_barrier call. */
4917 expand_omp_single (struct omp_region
*region
)
4919 basic_block entry_bb
, exit_bb
;
4920 gimple_stmt_iterator si
;
4921 bool need_barrier
= false;
4923 entry_bb
= region
->entry
;
4924 exit_bb
= region
->exit
;
4926 si
= gsi_last_bb (entry_bb
);
4927 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4928 be removed. We need to ensure that the thread that entered the single
4929 does not exit before the data is copied out by the other threads. */
4930 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si
)),
4931 OMP_CLAUSE_COPYPRIVATE
))
4932 need_barrier
= true;
4933 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
4934 gsi_remove (&si
, true);
4935 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4937 si
= gsi_last_bb (exit_bb
);
4938 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)) || need_barrier
)
4939 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4940 false, GSI_SAME_STMT
);
4941 gsi_remove (&si
, true);
4942 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4946 /* Generic expansion for OpenMP synchronization directives: master,
4947 ordered and critical. All we need to do here is remove the entry
4948 and exit markers for REGION. */
4951 expand_omp_synch (struct omp_region
*region
)
4953 basic_block entry_bb
, exit_bb
;
4954 gimple_stmt_iterator si
;
4956 entry_bb
= region
->entry
;
4957 exit_bb
= region
->exit
;
4959 si
= gsi_last_bb (entry_bb
);
4960 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
4961 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
4962 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
4963 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
);
4964 gsi_remove (&si
, true);
4965 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4969 si
= gsi_last_bb (exit_bb
);
4970 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4971 gsi_remove (&si
, true);
4972 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4976 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4977 operation as a normal volatile load. */
4980 expand_omp_atomic_load (basic_block load_bb
, tree addr
,
4981 tree loaded_val
, int index
)
4983 enum built_in_function tmpbase
;
4984 gimple_stmt_iterator gsi
;
4985 basic_block store_bb
;
4988 tree decl
, call
, type
, itype
;
4990 gsi
= gsi_last_bb (load_bb
);
4991 stmt
= gsi_stmt (gsi
);
4992 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
4993 loc
= gimple_location (stmt
);
4995 /* ??? If the target does not implement atomic_load_optab[mode], and mode
4996 is smaller than word size, then expand_atomic_load assumes that the load
4997 is atomic. We could avoid the builtin entirely in this case. */
4999 tmpbase
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
5000 decl
= builtin_decl_explicit (tmpbase
);
5001 if (decl
== NULL_TREE
)
5004 type
= TREE_TYPE (loaded_val
);
5005 itype
= TREE_TYPE (TREE_TYPE (decl
));
5007 call
= build_call_expr_loc (loc
, decl
, 2, addr
,
5008 build_int_cst (NULL
, MEMMODEL_RELAXED
));
5009 if (!useless_type_conversion_p (type
, itype
))
5010 call
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
5011 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
5013 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5014 gsi_remove (&gsi
, true);
5016 store_bb
= single_succ (load_bb
);
5017 gsi
= gsi_last_bb (store_bb
);
5018 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
5019 gsi_remove (&gsi
, true);
5021 if (gimple_in_ssa_p (cfun
))
5022 update_ssa (TODO_update_ssa_no_phi
);
5027 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5028 operation as a normal volatile store. */
5031 expand_omp_atomic_store (basic_block load_bb
, tree addr
,
5032 tree loaded_val
, tree stored_val
, int index
)
5034 enum built_in_function tmpbase
;
5035 gimple_stmt_iterator gsi
;
5036 basic_block store_bb
= single_succ (load_bb
);
5039 tree decl
, call
, type
, itype
;
5040 enum machine_mode imode
;
5043 gsi
= gsi_last_bb (load_bb
);
5044 stmt
= gsi_stmt (gsi
);
5045 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
5047 /* If the load value is needed, then this isn't a store but an exchange. */
5048 exchange
= gimple_omp_atomic_need_value_p (stmt
);
5050 gsi
= gsi_last_bb (store_bb
);
5051 stmt
= gsi_stmt (gsi
);
5052 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_STORE
);
5053 loc
= gimple_location (stmt
);
5055 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5056 is smaller than word size, then expand_atomic_store assumes that the store
5057 is atomic. We could avoid the builtin entirely in this case. */
5059 tmpbase
= (exchange
? BUILT_IN_ATOMIC_EXCHANGE_N
: BUILT_IN_ATOMIC_STORE_N
);
5060 tmpbase
= (enum built_in_function
) ((int) tmpbase
+ index
+ 1);
5061 decl
= builtin_decl_explicit (tmpbase
);
5062 if (decl
== NULL_TREE
)
5065 type
= TREE_TYPE (stored_val
);
5067 /* Dig out the type of the function's second argument. */
5068 itype
= TREE_TYPE (decl
);
5069 itype
= TYPE_ARG_TYPES (itype
);
5070 itype
= TREE_CHAIN (itype
);
5071 itype
= TREE_VALUE (itype
);
5072 imode
= TYPE_MODE (itype
);
5074 if (exchange
&& !can_atomic_exchange_p (imode
, true))
5077 if (!useless_type_conversion_p (itype
, type
))
5078 stored_val
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, itype
, stored_val
);
5079 call
= build_call_expr_loc (loc
, decl
, 3, addr
, stored_val
,
5080 build_int_cst (NULL
, MEMMODEL_RELAXED
));
5083 if (!useless_type_conversion_p (type
, itype
))
5084 call
= build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
5085 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
5088 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5089 gsi_remove (&gsi
, true);
5091 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5092 gsi
= gsi_last_bb (load_bb
);
5093 gsi_remove (&gsi
, true);
5095 if (gimple_in_ssa_p (cfun
))
5096 update_ssa (TODO_update_ssa_no_phi
);
5101 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5102 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5103 size of the data type, and thus usable to find the index of the builtin
5104 decl. Returns false if the expression is not of the proper form. */
5107 expand_omp_atomic_fetch_op (basic_block load_bb
,
5108 tree addr
, tree loaded_val
,
5109 tree stored_val
, int index
)
5111 enum built_in_function oldbase
, newbase
, tmpbase
;
5112 tree decl
, itype
, call
;
5114 basic_block store_bb
= single_succ (load_bb
);
5115 gimple_stmt_iterator gsi
;
5118 enum tree_code code
;
5119 bool need_old
, need_new
;
5120 enum machine_mode imode
;
5122 /* We expect to find the following sequences:
5125 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5128 val = tmp OP something; (or: something OP tmp)
5129 GIMPLE_OMP_STORE (val)
5131 ???FIXME: Allow a more flexible sequence.
5132 Perhaps use data flow to pick the statements.
5136 gsi
= gsi_after_labels (store_bb
);
5137 stmt
= gsi_stmt (gsi
);
5138 loc
= gimple_location (stmt
);
5139 if (!is_gimple_assign (stmt
))
5142 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
5144 need_new
= gimple_omp_atomic_need_value_p (gsi_stmt (gsi
));
5145 need_old
= gimple_omp_atomic_need_value_p (last_stmt (load_bb
));
5146 gcc_checking_assert (!need_old
|| !need_new
);
5148 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
5151 /* Check for one of the supported fetch-op operations. */
5152 code
= gimple_assign_rhs_code (stmt
);
5156 case POINTER_PLUS_EXPR
:
5157 oldbase
= BUILT_IN_ATOMIC_FETCH_ADD_N
;
5158 newbase
= BUILT_IN_ATOMIC_ADD_FETCH_N
;
5161 oldbase
= BUILT_IN_ATOMIC_FETCH_SUB_N
;
5162 newbase
= BUILT_IN_ATOMIC_SUB_FETCH_N
;
5165 oldbase
= BUILT_IN_ATOMIC_FETCH_AND_N
;
5166 newbase
= BUILT_IN_ATOMIC_AND_FETCH_N
;
5169 oldbase
= BUILT_IN_ATOMIC_FETCH_OR_N
;
5170 newbase
= BUILT_IN_ATOMIC_OR_FETCH_N
;
5173 oldbase
= BUILT_IN_ATOMIC_FETCH_XOR_N
;
5174 newbase
= BUILT_IN_ATOMIC_XOR_FETCH_N
;
5180 /* Make sure the expression is of the proper form. */
5181 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
5182 rhs
= gimple_assign_rhs2 (stmt
);
5183 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
5184 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
5185 rhs
= gimple_assign_rhs1 (stmt
);
5189 tmpbase
= ((enum built_in_function
)
5190 ((need_new
? newbase
: oldbase
) + index
+ 1));
5191 decl
= builtin_decl_explicit (tmpbase
);
5192 if (decl
== NULL_TREE
)
5194 itype
= TREE_TYPE (TREE_TYPE (decl
));
5195 imode
= TYPE_MODE (itype
);
5197 /* We could test all of the various optabs involved, but the fact of the
5198 matter is that (with the exception of i486 vs i586 and xadd) all targets
5199 that support any atomic operaton optab also implements compare-and-swap.
5200 Let optabs.c take care of expanding any compare-and-swap loop. */
5201 if (!can_compare_and_swap_p (imode
, true))
5204 gsi
= gsi_last_bb (load_bb
);
5205 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5207 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5208 It only requires that the operation happen atomically. Thus we can
5209 use the RELAXED memory model. */
5210 call
= build_call_expr_loc (loc
, decl
, 3, addr
,
5211 fold_convert_loc (loc
, itype
, rhs
),
5212 build_int_cst (NULL
, MEMMODEL_RELAXED
));
5214 if (need_old
|| need_new
)
5216 lhs
= need_old
? loaded_val
: stored_val
;
5217 call
= fold_convert_loc (loc
, TREE_TYPE (lhs
), call
);
5218 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, lhs
, call
);
5221 call
= fold_convert_loc (loc
, void_type_node
, call
);
5222 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5223 gsi_remove (&gsi
, true);
5225 gsi
= gsi_last_bb (store_bb
);
5226 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
5227 gsi_remove (&gsi
, true);
5228 gsi
= gsi_last_bb (store_bb
);
5229 gsi_remove (&gsi
, true);
5231 if (gimple_in_ssa_p (cfun
))
5232 update_ssa (TODO_update_ssa_no_phi
);
5237 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5241 newval = rhs; // with oldval replacing *addr in rhs
5242 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5243 if (oldval != newval)
5246 INDEX is log2 of the size of the data type, and thus usable to find the
5247 index of the builtin decl. */
5250 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
5251 tree addr
, tree loaded_val
, tree stored_val
,
5254 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
5255 tree type
, itype
, cmpxchg
, iaddr
;
5256 gimple_stmt_iterator si
;
5257 basic_block loop_header
= single_succ (load_bb
);
5260 enum built_in_function fncode
;
5262 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5263 order to use the RELAXED memory model effectively. */
5264 fncode
= (enum built_in_function
)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5266 cmpxchg
= builtin_decl_explicit (fncode
);
5267 if (cmpxchg
== NULL_TREE
)
5269 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5270 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
5272 if (!can_compare_and_swap_p (TYPE_MODE (itype
), true))
5275 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5276 si
= gsi_last_bb (load_bb
);
5277 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5279 /* For floating-point values, we'll need to view-convert them to integers
5280 so that we can perform the atomic compare and swap. Simplify the
5281 following code by always setting up the "i"ntegral variables. */
5282 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
5286 iaddr
= create_tmp_var (build_pointer_type_for_mode (itype
, ptr_mode
,
5289 = force_gimple_operand_gsi (&si
,
5290 fold_convert (TREE_TYPE (iaddr
), addr
),
5291 false, NULL_TREE
, true, GSI_SAME_STMT
);
5292 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
5293 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5294 loadedi
= create_tmp_var (itype
, NULL
);
5295 if (gimple_in_ssa_p (cfun
))
5297 add_referenced_var (iaddr
);
5298 add_referenced_var (loadedi
);
5299 loadedi
= make_ssa_name (loadedi
, NULL
);
5305 loadedi
= loaded_val
;
5309 = force_gimple_operand_gsi (&si
,
5310 build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)),
5312 build_int_cst (TREE_TYPE (iaddr
), 0)),
5313 true, NULL_TREE
, true, GSI_SAME_STMT
);
5315 /* Move the value to the LOADEDI temporary. */
5316 if (gimple_in_ssa_p (cfun
))
5318 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
5319 phi
= create_phi_node (loadedi
, loop_header
);
5320 SSA_NAME_DEF_STMT (loadedi
) = phi
;
5321 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
5325 gsi_insert_before (&si
,
5326 gimple_build_assign (loadedi
, initial
),
5328 if (loadedi
!= loaded_val
)
5330 gimple_stmt_iterator gsi2
;
5333 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
5334 gsi2
= gsi_start_bb (loop_header
);
5335 if (gimple_in_ssa_p (cfun
))
5338 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5339 true, GSI_SAME_STMT
);
5340 stmt
= gimple_build_assign (loaded_val
, x
);
5341 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
5345 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
5346 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5347 true, GSI_SAME_STMT
);
5350 gsi_remove (&si
, true);
5352 si
= gsi_last_bb (store_bb
);
5353 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5356 storedi
= stored_val
;
5359 force_gimple_operand_gsi (&si
,
5360 build1 (VIEW_CONVERT_EXPR
, itype
,
5361 stored_val
), true, NULL_TREE
, true,
5364 /* Build the compare&swap statement. */
5365 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
5366 new_storedi
= force_gimple_operand_gsi (&si
,
5367 fold_convert (TREE_TYPE (loadedi
),
5370 true, GSI_SAME_STMT
);
5372 if (gimple_in_ssa_p (cfun
))
5376 old_vali
= create_tmp_var (TREE_TYPE (loadedi
), NULL
);
5377 if (gimple_in_ssa_p (cfun
))
5378 add_referenced_var (old_vali
);
5379 stmt
= gimple_build_assign (old_vali
, loadedi
);
5380 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5382 stmt
= gimple_build_assign (loadedi
, new_storedi
);
5383 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5386 /* Note that we always perform the comparison as an integer, even for
5387 floating point. This allows the atomic operation to properly
5388 succeed even with NaNs and -0.0. */
5389 stmt
= gimple_build_cond_empty
5390 (build2 (NE_EXPR
, boolean_type_node
,
5391 new_storedi
, old_vali
));
5392 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5395 e
= single_succ_edge (store_bb
);
5396 e
->flags
&= ~EDGE_FALLTHRU
;
5397 e
->flags
|= EDGE_FALSE_VALUE
;
5399 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
5401 /* Copy the new value to loadedi (we already did that before the condition
5402 if we are not in SSA). */
5403 if (gimple_in_ssa_p (cfun
))
5405 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
5406 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
5409 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5410 gsi_remove (&si
, true);
5412 if (gimple_in_ssa_p (cfun
))
5413 update_ssa (TODO_update_ssa_no_phi
);
5418 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5420 GOMP_atomic_start ();
5424 The result is not globally atomic, but works so long as all parallel
5425 references are within #pragma omp atomic directives. According to
5426 responses received from omp@openmp.org, appears to be within spec.
5427 Which makes sense, since that's how several other compilers handle
5428 this situation as well.
5429 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5430 expanding. STORED_VAL is the operand of the matching
5431 GIMPLE_OMP_ATOMIC_STORE.
5434 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5438 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5443 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
5444 tree addr
, tree loaded_val
, tree stored_val
)
5446 gimple_stmt_iterator si
;
5450 si
= gsi_last_bb (load_bb
);
5451 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5453 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
);
5454 t
= build_call_expr (t
, 0);
5455 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5457 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
5458 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5459 gsi_remove (&si
, true);
5461 si
= gsi_last_bb (store_bb
);
5462 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5464 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
5466 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5468 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
);
5469 t
= build_call_expr (t
, 0);
5470 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5471 gsi_remove (&si
, true);
5473 if (gimple_in_ssa_p (cfun
))
5474 update_ssa (TODO_update_ssa_no_phi
);
5478 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5479 using expand_omp_atomic_fetch_op. If it failed, we try to
5480 call expand_omp_atomic_pipeline, and if it fails too, the
5481 ultimate fallback is wrapping the operation in a mutex
5482 (expand_omp_atomic_mutex). REGION is the atomic region built
5483 by build_omp_regions_1(). */
5486 expand_omp_atomic (struct omp_region
*region
)
5488 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
5489 gimple load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
5490 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
5491 tree addr
= gimple_omp_atomic_load_rhs (load
);
5492 tree stored_val
= gimple_omp_atomic_store_val (store
);
5493 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5494 HOST_WIDE_INT index
;
5496 /* Make sure the type is one of the supported sizes. */
5497 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5498 index
= exact_log2 (index
);
5499 if (index
>= 0 && index
<= 4)
5501 unsigned int align
= TYPE_ALIGN_UNIT (type
);
5503 /* __sync builtins require strict data alignment. */
5504 /* ??? Assume BIGGEST_ALIGNMENT *is* aligned. */
5505 if (exact_log2 (align
) >= index
5506 || align
* BITS_PER_UNIT
>= BIGGEST_ALIGNMENT
)
5509 if (loaded_val
== stored_val
5510 && (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
5511 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
5512 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
5513 && expand_omp_atomic_load (load_bb
, addr
, loaded_val
, index
))
5517 if ((GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
5518 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
5519 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
5520 && store_bb
== single_succ (load_bb
)
5521 && first_stmt (store_bb
) == store
5522 && expand_omp_atomic_store (load_bb
, addr
, loaded_val
,
5526 /* When possible, use specialized atomic update functions. */
5527 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
5528 && store_bb
== single_succ (load_bb
)
5529 && expand_omp_atomic_fetch_op (load_bb
, addr
,
5530 loaded_val
, stored_val
, index
))
5533 /* If we don't have specialized __sync builtins, try and implement
5534 as a compare and swap loop. */
5535 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
5536 loaded_val
, stored_val
, index
))
5541 /* The ultimate fallback is wrapping the operation in a mutex. */
5542 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
5546 /* Expand the parallel region tree rooted at REGION. Expansion
5547 proceeds in depth-first order. Innermost regions are expanded
5548 first. This way, parallel regions that require a new function to
5549 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5550 internal dependencies in their body. */
5553 expand_omp (struct omp_region
*region
)
5557 location_t saved_location
;
5559 /* First, determine whether this is a combined parallel+workshare
5561 if (region
->type
== GIMPLE_OMP_PARALLEL
)
5562 determine_parallel_type (region
);
5565 expand_omp (region
->inner
);
5567 saved_location
= input_location
;
5568 if (gimple_has_location (last_stmt (region
->entry
)))
5569 input_location
= gimple_location (last_stmt (region
->entry
));
5571 switch (region
->type
)
5573 case GIMPLE_OMP_PARALLEL
:
5574 case GIMPLE_OMP_TASK
:
5575 expand_omp_taskreg (region
);
5578 case GIMPLE_OMP_FOR
:
5579 expand_omp_for (region
);
5582 case GIMPLE_OMP_SECTIONS
:
5583 expand_omp_sections (region
);
5586 case GIMPLE_OMP_SECTION
:
5587 /* Individual omp sections are handled together with their
5588 parent GIMPLE_OMP_SECTIONS region. */
5591 case GIMPLE_OMP_SINGLE
:
5592 expand_omp_single (region
);
5595 case GIMPLE_OMP_MASTER
:
5596 case GIMPLE_OMP_ORDERED
:
5597 case GIMPLE_OMP_CRITICAL
:
5598 expand_omp_synch (region
);
5601 case GIMPLE_OMP_ATOMIC_LOAD
:
5602 expand_omp_atomic (region
);
5609 input_location
= saved_location
;
5610 region
= region
->next
;
5615 /* Helper for build_omp_regions. Scan the dominator tree starting at
5616 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5617 true, the function ends once a single tree is built (otherwise, whole
5618 forest of OMP constructs may be built). */
5621 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
5624 gimple_stmt_iterator gsi
;
5628 gsi
= gsi_last_bb (bb
);
5629 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
5631 struct omp_region
*region
;
5632 enum gimple_code code
;
5634 stmt
= gsi_stmt (gsi
);
5635 code
= gimple_code (stmt
);
5636 if (code
== GIMPLE_OMP_RETURN
)
5638 /* STMT is the return point out of region PARENT. Mark it
5639 as the exit point and make PARENT the immediately
5640 enclosing region. */
5641 gcc_assert (parent
);
5644 parent
= parent
->outer
;
5646 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
5648 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5649 GIMPLE_OMP_RETURN, but matches with
5650 GIMPLE_OMP_ATOMIC_LOAD. */
5651 gcc_assert (parent
);
5652 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
5655 parent
= parent
->outer
;
5658 else if (code
== GIMPLE_OMP_CONTINUE
)
5660 gcc_assert (parent
);
5663 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
5665 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5666 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5671 /* Otherwise, this directive becomes the parent for a new
5673 region
= new_omp_region (bb
, code
, parent
);
5678 if (single_tree
&& !parent
)
5681 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
5683 son
= next_dom_son (CDI_DOMINATORS
, son
))
5684 build_omp_regions_1 (son
, parent
, single_tree
);
5687 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5691 build_omp_regions_root (basic_block root
)
5693 gcc_assert (root_omp_region
== NULL
);
5694 build_omp_regions_1 (root
, NULL
, true);
5695 gcc_assert (root_omp_region
!= NULL
);
5698 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5701 omp_expand_local (basic_block head
)
5703 build_omp_regions_root (head
);
5704 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5706 fprintf (dump_file
, "\nOMP region tree\n\n");
5707 dump_omp_region (dump_file
, root_omp_region
, 0);
5708 fprintf (dump_file
, "\n");
5711 remove_exit_barriers (root_omp_region
);
5712 expand_omp (root_omp_region
);
5714 free_omp_regions ();
5717 /* Scan the CFG and build a tree of OMP regions. Return the root of
5718 the OMP region tree. */
5721 build_omp_regions (void)
5723 gcc_assert (root_omp_region
== NULL
);
5724 calculate_dominance_info (CDI_DOMINATORS
);
5725 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
5728 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5731 execute_expand_omp (void)
5733 build_omp_regions ();
5735 if (!root_omp_region
)
5740 fprintf (dump_file
, "\nOMP region tree\n\n");
5741 dump_omp_region (dump_file
, root_omp_region
, 0);
5742 fprintf (dump_file
, "\n");
5745 remove_exit_barriers (root_omp_region
);
5747 expand_omp (root_omp_region
);
5749 cleanup_tree_cfg ();
5751 free_omp_regions ();
5756 /* OMP expansion -- the default pass, run before creation of SSA form. */
5759 gate_expand_omp (void)
5761 return (flag_openmp
!= 0 && !seen_error ());
5764 struct gimple_opt_pass pass_expand_omp
=
5768 "ompexp", /* name */
5769 gate_expand_omp
, /* gate */
5770 execute_expand_omp
, /* execute */
5773 0, /* static_pass_number */
5774 TV_NONE
, /* tv_id */
5775 PROP_gimple_any
, /* properties_required */
5776 0, /* properties_provided */
5777 0, /* properties_destroyed */
5778 0, /* todo_flags_start */
5779 0 /* todo_flags_finish */
5783 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5785 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5786 CTX is the enclosing OMP context for the current statement. */
5789 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5791 tree block
, control
;
5792 gimple_stmt_iterator tgsi
;
5794 gimple stmt
, new_stmt
, bind
, t
;
5795 gimple_seq ilist
, dlist
, olist
, new_body
, body
;
5796 struct gimplify_ctx gctx
;
5798 stmt
= gsi_stmt (*gsi_p
);
5800 push_gimplify_context (&gctx
);
5804 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
5805 &ilist
, &dlist
, ctx
);
5807 tgsi
= gsi_start (gimple_omp_body (stmt
));
5808 for (len
= 0; !gsi_end_p (tgsi
); len
++, gsi_next (&tgsi
))
5811 tgsi
= gsi_start (gimple_omp_body (stmt
));
5813 for (i
= 0; i
< len
; i
++, gsi_next (&tgsi
))
5818 sec_start
= gsi_stmt (tgsi
);
5819 sctx
= maybe_lookup_ctx (sec_start
);
5822 gimple_seq_add_stmt (&body
, sec_start
);
5824 lower_omp (gimple_omp_body (sec_start
), sctx
);
5825 gimple_seq_add_seq (&body
, gimple_omp_body (sec_start
));
5826 gimple_omp_set_body (sec_start
, NULL
);
5830 gimple_seq l
= NULL
;
5831 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
5833 gimple_seq_add_seq (&body
, l
);
5834 gimple_omp_section_set_last (sec_start
);
5837 gimple_seq_add_stmt (&body
, gimple_build_omp_return (false));
5840 block
= make_node (BLOCK
);
5841 bind
= gimple_build_bind (NULL
, body
, block
);
5844 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
5846 block
= make_node (BLOCK
);
5847 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
5849 pop_gimplify_context (new_stmt
);
5850 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
5851 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5852 if (BLOCK_VARS (block
))
5853 TREE_USED (block
) = 1;
5856 gimple_seq_add_seq (&new_body
, ilist
);
5857 gimple_seq_add_stmt (&new_body
, stmt
);
5858 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
5859 gimple_seq_add_stmt (&new_body
, bind
);
5861 control
= create_tmp_var (unsigned_type_node
, ".section");
5862 t
= gimple_build_omp_continue (control
, control
);
5863 gimple_omp_sections_set_control (stmt
, control
);
5864 gimple_seq_add_stmt (&new_body
, t
);
5866 gimple_seq_add_seq (&new_body
, olist
);
5867 gimple_seq_add_seq (&new_body
, dlist
);
5869 new_body
= maybe_catch_exception (new_body
);
5871 t
= gimple_build_omp_return
5872 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
5873 OMP_CLAUSE_NOWAIT
));
5874 gimple_seq_add_stmt (&new_body
, t
);
5876 gimple_bind_set_body (new_stmt
, new_body
);
5877 gimple_omp_set_body (stmt
, NULL
);
5879 gsi_replace (gsi_p
, new_stmt
, true);
5883 /* A subroutine of lower_omp_single. Expand the simple form of
5884 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5886 if (GOMP_single_start ())
5888 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5890 FIXME. It may be better to delay expanding the logic of this until
5891 pass_expand_omp. The expanded logic may make the job more difficult
5892 to a synchronization analysis pass. */
5895 lower_omp_single_simple (gimple single_stmt
, gimple_seq
*pre_p
)
5897 location_t loc
= gimple_location (single_stmt
);
5898 tree tlabel
= create_artificial_label (loc
);
5899 tree flabel
= create_artificial_label (loc
);
5903 decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START
);
5904 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)), NULL
);
5905 call
= gimple_build_call (decl
, 0);
5906 gimple_call_set_lhs (call
, lhs
);
5907 gimple_seq_add_stmt (pre_p
, call
);
5909 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
5910 fold_convert_loc (loc
, TREE_TYPE (lhs
),
5913 gimple_seq_add_stmt (pre_p
, cond
);
5914 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
5915 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5916 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
5920 /* A subroutine of lower_omp_single. Expand the simple form of
5921 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5923 #pragma omp single copyprivate (a, b, c)
5925 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5928 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5934 GOMP_single_copy_end (©out);
5945 FIXME. It may be better to delay expanding the logic of this until
5946 pass_expand_omp. The expanded logic may make the job more difficult
5947 to a synchronization analysis pass. */
5950 lower_omp_single_copy (gimple single_stmt
, gimple_seq
*pre_p
, omp_context
*ctx
)
5952 tree ptr_type
, t
, l0
, l1
, l2
, bfn_decl
;
5953 gimple_seq copyin_seq
;
5954 location_t loc
= gimple_location (single_stmt
);
5956 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
5958 ptr_type
= build_pointer_type (ctx
->record_type
);
5959 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
5961 l0
= create_artificial_label (loc
);
5962 l1
= create_artificial_label (loc
);
5963 l2
= create_artificial_label (loc
);
5965 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START
);
5966 t
= build_call_expr_loc (loc
, bfn_decl
, 0);
5967 t
= fold_convert_loc (loc
, ptr_type
, t
);
5968 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
5970 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
5971 build_int_cst (ptr_type
, 0));
5972 t
= build3 (COND_EXPR
, void_type_node
, t
,
5973 build_and_jump (&l0
), build_and_jump (&l1
));
5974 gimplify_and_add (t
, pre_p
);
5976 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
5978 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5981 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
5984 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
5985 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END
);
5986 t
= build_call_expr_loc (loc
, bfn_decl
, 1, t
);
5987 gimplify_and_add (t
, pre_p
);
5989 t
= build_and_jump (&l2
);
5990 gimplify_and_add (t
, pre_p
);
5992 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
5994 gimple_seq_add_seq (pre_p
, copyin_seq
);
5996 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
6000 /* Expand code for an OpenMP single directive. */
6003 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6006 gimple t
, bind
, single_stmt
= gsi_stmt (*gsi_p
);
6007 gimple_seq bind_body
, dlist
;
6008 struct gimplify_ctx gctx
;
6010 push_gimplify_context (&gctx
);
6013 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
6014 &bind_body
, &dlist
, ctx
);
6015 lower_omp (gimple_omp_body (single_stmt
), ctx
);
6017 gimple_seq_add_stmt (&bind_body
, single_stmt
);
6019 if (ctx
->record_type
)
6020 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
6022 lower_omp_single_simple (single_stmt
, &bind_body
);
6024 gimple_omp_set_body (single_stmt
, NULL
);
6026 gimple_seq_add_seq (&bind_body
, dlist
);
6028 bind_body
= maybe_catch_exception (bind_body
);
6030 t
= gimple_build_omp_return
6031 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
6032 OMP_CLAUSE_NOWAIT
));
6033 gimple_seq_add_stmt (&bind_body
, t
);
6035 block
= make_node (BLOCK
);
6036 bind
= gimple_build_bind (NULL
, bind_body
, block
);
6038 pop_gimplify_context (bind
);
6040 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6041 BLOCK_VARS (block
) = ctx
->block_vars
;
6042 gsi_replace (gsi_p
, bind
, true);
6043 if (BLOCK_VARS (block
))
6044 TREE_USED (block
) = 1;
6048 /* Expand code for an OpenMP master directive. */
6051 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6053 tree block
, lab
= NULL
, x
, bfn_decl
;
6054 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
6055 location_t loc
= gimple_location (stmt
);
6057 struct gimplify_ctx gctx
;
6059 push_gimplify_context (&gctx
);
6061 block
= make_node (BLOCK
);
6062 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
6065 bfn_decl
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
6066 x
= build_call_expr_loc (loc
, bfn_decl
, 0);
6067 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
6068 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
6070 gimplify_and_add (x
, &tseq
);
6071 gimple_bind_add_seq (bind
, tseq
);
6073 lower_omp (gimple_omp_body (stmt
), ctx
);
6074 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6075 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6076 gimple_omp_set_body (stmt
, NULL
);
6078 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
6080 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6082 pop_gimplify_context (bind
);
6084 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6085 BLOCK_VARS (block
) = ctx
->block_vars
;
6086 gsi_replace (gsi_p
, bind
, true);
6090 /* Expand code for an OpenMP ordered directive. */
6093 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6096 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
6097 struct gimplify_ctx gctx
;
6099 push_gimplify_context (&gctx
);
6101 block
= make_node (BLOCK
);
6102 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
6105 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START
),
6107 gimple_bind_add_stmt (bind
, x
);
6109 lower_omp (gimple_omp_body (stmt
), ctx
);
6110 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6111 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6112 gimple_omp_set_body (stmt
, NULL
);
6114 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END
), 0);
6115 gimple_bind_add_stmt (bind
, x
);
6117 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6119 pop_gimplify_context (bind
);
6121 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6122 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
6123 gsi_replace (gsi_p
, bind
, true);
6127 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6128 substitution of a couple of function calls. But in the NAMED case,
6129 requires that languages coordinate a symbol name. It is therefore
6130 best put here in common code. */
6132 static GTY((param1_is (tree
), param2_is (tree
)))
6133 splay_tree critical_name_mutexes
;
6136 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6139 tree name
, lock
, unlock
;
6140 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
6141 location_t loc
= gimple_location (stmt
);
6143 struct gimplify_ctx gctx
;
6145 name
= gimple_omp_critical_name (stmt
);
6151 if (!critical_name_mutexes
)
6152 critical_name_mutexes
6153 = splay_tree_new_ggc (splay_tree_compare_pointers
,
6154 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s
,
6155 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s
);
6157 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
6162 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
6164 new_str
= ACONCAT ((".gomp_critical_user_",
6165 IDENTIFIER_POINTER (name
), NULL
));
6166 DECL_NAME (decl
) = get_identifier (new_str
);
6167 TREE_PUBLIC (decl
) = 1;
6168 TREE_STATIC (decl
) = 1;
6169 DECL_COMMON (decl
) = 1;
6170 DECL_ARTIFICIAL (decl
) = 1;
6171 DECL_IGNORED_P (decl
) = 1;
6172 varpool_finalize_decl (decl
);
6174 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
6175 (splay_tree_value
) decl
);
6178 decl
= (tree
) n
->value
;
6180 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START
);
6181 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
6183 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END
);
6184 unlock
= build_call_expr_loc (loc
, unlock
, 1,
6185 build_fold_addr_expr_loc (loc
, decl
));
6189 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START
);
6190 lock
= build_call_expr_loc (loc
, lock
, 0);
6192 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END
);
6193 unlock
= build_call_expr_loc (loc
, unlock
, 0);
6196 push_gimplify_context (&gctx
);
6198 block
= make_node (BLOCK
);
6199 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
), block
);
6201 tbody
= gimple_bind_body (bind
);
6202 gimplify_and_add (lock
, &tbody
);
6203 gimple_bind_set_body (bind
, tbody
);
6205 lower_omp (gimple_omp_body (stmt
), ctx
);
6206 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6207 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6208 gimple_omp_set_body (stmt
, NULL
);
6210 tbody
= gimple_bind_body (bind
);
6211 gimplify_and_add (unlock
, &tbody
);
6212 gimple_bind_set_body (bind
, tbody
);
6214 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6216 pop_gimplify_context (bind
);
6217 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6218 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
6219 gsi_replace (gsi_p
, bind
, true);
6223 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6224 for a lastprivate clause. Given a loop control predicate of (V
6225 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6226 is appended to *DLIST, iterator initialization is appended to
6230 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
6231 gimple_seq
*dlist
, struct omp_context
*ctx
)
6233 tree clauses
, cond
, vinit
;
6234 enum tree_code cond_code
;
6237 cond_code
= fd
->loop
.cond_code
;
6238 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
6240 /* When possible, use a strict equality expression. This can let VRP
6241 type optimizations deduce the value and remove a copy. */
6242 if (host_integerp (fd
->loop
.step
, 0))
6244 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->loop
.step
);
6245 if (step
== 1 || step
== -1)
6246 cond_code
= EQ_EXPR
;
6249 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, fd
->loop
.n2
);
6251 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
6253 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
6254 if (!gimple_seq_empty_p (stmts
))
6256 gimple_seq_add_seq (&stmts
, *dlist
);
6259 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6260 vinit
= fd
->loop
.n1
;
6261 if (cond_code
== EQ_EXPR
6262 && host_integerp (fd
->loop
.n2
, 0)
6263 && ! integer_zerop (fd
->loop
.n2
))
6264 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
6266 /* Initialize the iterator variable, so that threads that don't execute
6267 any iterations don't execute the lastprivate clauses by accident. */
6268 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
6273 /* Lower code for an OpenMP loop directive. */
6276 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6279 struct omp_for_data fd
;
6280 gimple stmt
= gsi_stmt (*gsi_p
), new_stmt
;
6281 gimple_seq omp_for_body
, body
, dlist
;
6283 struct gimplify_ctx gctx
;
6285 push_gimplify_context (&gctx
);
6287 lower_omp (gimple_omp_for_pre_body (stmt
), ctx
);
6288 lower_omp (gimple_omp_body (stmt
), ctx
);
6290 block
= make_node (BLOCK
);
6291 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
6293 /* Move declaration of temporaries in the loop body before we make
6295 omp_for_body
= gimple_omp_body (stmt
);
6296 if (!gimple_seq_empty_p (omp_for_body
)
6297 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
6299 tree vars
= gimple_bind_vars (gimple_seq_first_stmt (omp_for_body
));
6300 gimple_bind_append_vars (new_stmt
, vars
);
6303 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6306 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
);
6307 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
6309 /* Lower the header expressions. At this point, we can assume that
6310 the header is of the form:
6312 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6314 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6315 using the .omp_data_s mapping, if needed. */
6316 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
6318 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
6319 if (!is_gimple_min_invariant (*rhs_p
))
6320 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6322 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
6323 if (!is_gimple_min_invariant (*rhs_p
))
6324 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6326 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
6327 if (!is_gimple_min_invariant (*rhs_p
))
6328 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6331 /* Once lowered, extract the bounds and clauses. */
6332 extract_omp_for_data (stmt
, &fd
, NULL
);
6334 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
6336 gimple_seq_add_stmt (&body
, stmt
);
6337 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
6339 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
6342 /* After the loop, add exit clauses. */
6343 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
6344 gimple_seq_add_seq (&body
, dlist
);
6346 body
= maybe_catch_exception (body
);
6348 /* Region exit marker goes at the end of the loop body. */
6349 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
6351 pop_gimplify_context (new_stmt
);
6353 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
6354 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
6355 if (BLOCK_VARS (block
))
6356 TREE_USED (block
) = 1;
6358 gimple_bind_set_body (new_stmt
, body
);
6359 gimple_omp_set_body (stmt
, NULL
);
6360 gimple_omp_for_set_pre_body (stmt
, NULL
);
6361 gsi_replace (gsi_p
, new_stmt
, true);
6364 /* Callback for walk_stmts. Check if the current statement only contains
6365 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6368 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
6369 bool *handled_ops_p
,
6370 struct walk_stmt_info
*wi
)
6372 int *info
= (int *) wi
->info
;
6373 gimple stmt
= gsi_stmt (*gsi_p
);
6375 *handled_ops_p
= true;
6376 switch (gimple_code (stmt
))
6380 case GIMPLE_OMP_FOR
:
6381 case GIMPLE_OMP_SECTIONS
:
6382 *info
= *info
== 0 ? 1 : -1;
6391 struct omp_taskcopy_context
6393 /* This field must be at the beginning, as we do "inheritance": Some
6394 callback functions for tree-inline.c (e.g., omp_copy_decl)
6395 receive a copy_body_data pointer that is up-casted to an
6396 omp_context pointer. */
6402 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
6404 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
6406 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
6407 return create_tmp_var (TREE_TYPE (var
), NULL
);
6413 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
6415 tree name
, new_fields
= NULL
, type
, f
;
6417 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6418 name
= DECL_NAME (TYPE_NAME (orig_type
));
6419 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
6420 TYPE_DECL
, name
, type
);
6421 TYPE_NAME (type
) = name
;
6423 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
6425 tree new_f
= copy_node (f
);
6426 DECL_CONTEXT (new_f
) = type
;
6427 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
6428 TREE_CHAIN (new_f
) = new_fields
;
6429 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6430 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6431 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
6434 *pointer_map_insert (tcctx
->cb
.decl_map
, f
) = new_f
;
6436 TYPE_FIELDS (type
) = nreverse (new_fields
);
6441 /* Create task copyfn. */
6444 create_task_copyfn (gimple task_stmt
, omp_context
*ctx
)
6446 struct function
*child_cfun
;
6447 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
6448 tree record_type
, srecord_type
, bind
, list
;
6449 bool record_needs_remap
= false, srecord_needs_remap
= false;
6451 struct omp_taskcopy_context tcctx
;
6452 struct gimplify_ctx gctx
;
6453 location_t loc
= gimple_location (task_stmt
);
6455 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
6456 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
6457 gcc_assert (child_cfun
->cfg
== NULL
);
6458 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
6460 /* Reset DECL_CONTEXT on function arguments. */
6461 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
6462 DECL_CONTEXT (t
) = child_fn
;
6464 /* Populate the function. */
6465 push_gimplify_context (&gctx
);
6466 current_function_decl
= child_fn
;
6468 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
6469 TREE_SIDE_EFFECTS (bind
) = 1;
6471 DECL_SAVED_TREE (child_fn
) = bind
;
6472 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
6474 /* Remap src and dst argument types if needed. */
6475 record_type
= ctx
->record_type
;
6476 srecord_type
= ctx
->srecord_type
;
6477 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
6478 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6480 record_needs_remap
= true;
6483 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= DECL_CHAIN (f
))
6484 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6486 srecord_needs_remap
= true;
6490 if (record_needs_remap
|| srecord_needs_remap
)
6492 memset (&tcctx
, '\0', sizeof (tcctx
));
6493 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
6494 tcctx
.cb
.dst_fn
= child_fn
;
6495 tcctx
.cb
.src_node
= cgraph_get_node (tcctx
.cb
.src_fn
);
6496 gcc_checking_assert (tcctx
.cb
.src_node
);
6497 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
6498 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
6499 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
6500 tcctx
.cb
.eh_lp_nr
= 0;
6501 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
6502 tcctx
.cb
.decl_map
= pointer_map_create ();
6505 if (record_needs_remap
)
6506 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
6507 if (srecord_needs_remap
)
6508 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
6511 tcctx
.cb
.decl_map
= NULL
;
6513 push_cfun (child_cfun
);
6515 arg
= DECL_ARGUMENTS (child_fn
);
6516 TREE_TYPE (arg
) = build_pointer_type (record_type
);
6517 sarg
= DECL_CHAIN (arg
);
6518 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
6520 /* First pass: initialize temporaries used in record_type and srecord_type
6521 sizes and field offsets. */
6522 if (tcctx
.cb
.decl_map
)
6523 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6524 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6528 decl
= OMP_CLAUSE_DECL (c
);
6529 p
= (tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, decl
);
6532 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6533 sf
= (tree
) n
->value
;
6534 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6535 src
= build_simple_mem_ref_loc (loc
, sarg
);
6536 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6537 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
6538 append_to_statement_list (t
, &list
);
6541 /* Second pass: copy shared var pointers and copy construct non-VLA
6542 firstprivate vars. */
6543 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6544 switch (OMP_CLAUSE_CODE (c
))
6546 case OMP_CLAUSE_SHARED
:
6547 decl
= OMP_CLAUSE_DECL (c
);
6548 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6551 f
= (tree
) n
->value
;
6552 if (tcctx
.cb
.decl_map
)
6553 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6554 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6555 sf
= (tree
) n
->value
;
6556 if (tcctx
.cb
.decl_map
)
6557 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6558 src
= build_simple_mem_ref_loc (loc
, sarg
);
6559 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6560 dst
= build_simple_mem_ref_loc (loc
, arg
);
6561 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6562 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6563 append_to_statement_list (t
, &list
);
6565 case OMP_CLAUSE_FIRSTPRIVATE
:
6566 decl
= OMP_CLAUSE_DECL (c
);
6567 if (is_variable_sized (decl
))
6569 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6572 f
= (tree
) n
->value
;
6573 if (tcctx
.cb
.decl_map
)
6574 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6575 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6578 sf
= (tree
) n
->value
;
6579 if (tcctx
.cb
.decl_map
)
6580 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6581 src
= build_simple_mem_ref_loc (loc
, sarg
);
6582 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6583 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
6584 src
= build_simple_mem_ref_loc (loc
, src
);
6588 dst
= build_simple_mem_ref_loc (loc
, arg
);
6589 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6590 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6591 append_to_statement_list (t
, &list
);
6593 case OMP_CLAUSE_PRIVATE
:
6594 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
6596 decl
= OMP_CLAUSE_DECL (c
);
6597 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6598 f
= (tree
) n
->value
;
6599 if (tcctx
.cb
.decl_map
)
6600 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6601 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6604 sf
= (tree
) n
->value
;
6605 if (tcctx
.cb
.decl_map
)
6606 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6607 src
= build_simple_mem_ref_loc (loc
, sarg
);
6608 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6609 if (use_pointer_for_field (decl
, NULL
))
6610 src
= build_simple_mem_ref_loc (loc
, src
);
6614 dst
= build_simple_mem_ref_loc (loc
, arg
);
6615 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6616 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6617 append_to_statement_list (t
, &list
);
6623 /* Last pass: handle VLA firstprivates. */
6624 if (tcctx
.cb
.decl_map
)
6625 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6626 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6630 decl
= OMP_CLAUSE_DECL (c
);
6631 if (!is_variable_sized (decl
))
6633 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6636 f
= (tree
) n
->value
;
6637 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6638 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
6639 ind
= DECL_VALUE_EXPR (decl
);
6640 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
6641 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
6642 n
= splay_tree_lookup (ctx
->sfield_map
,
6643 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6644 sf
= (tree
) n
->value
;
6645 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6646 src
= build_simple_mem_ref_loc (loc
, sarg
);
6647 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6648 src
= build_simple_mem_ref_loc (loc
, src
);
6649 dst
= build_simple_mem_ref_loc (loc
, arg
);
6650 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6651 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6652 append_to_statement_list (t
, &list
);
6653 n
= splay_tree_lookup (ctx
->field_map
,
6654 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6655 df
= (tree
) n
->value
;
6656 df
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, df
);
6657 ptr
= build_simple_mem_ref_loc (loc
, arg
);
6658 ptr
= build3 (COMPONENT_REF
, TREE_TYPE (df
), ptr
, df
, NULL
);
6659 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
6660 build_fold_addr_expr_loc (loc
, dst
));
6661 append_to_statement_list (t
, &list
);
6664 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
6665 append_to_statement_list (t
, &list
);
6667 if (tcctx
.cb
.decl_map
)
6668 pointer_map_destroy (tcctx
.cb
.decl_map
);
6669 pop_gimplify_context (NULL
);
6670 BIND_EXPR_BODY (bind
) = list
;
6672 current_function_decl
= ctx
->cb
.src_fn
;
6675 /* Lower the OpenMP parallel or task directive in the current statement
6676 in GSI_P. CTX holds context information for the directive. */
6679 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6683 gimple stmt
= gsi_stmt (*gsi_p
);
6684 gimple par_bind
, bind
;
6685 gimple_seq par_body
, olist
, ilist
, par_olist
, par_ilist
, new_body
;
6686 struct gimplify_ctx gctx
;
6687 location_t loc
= gimple_location (stmt
);
6689 clauses
= gimple_omp_taskreg_clauses (stmt
);
6690 par_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
6691 par_body
= gimple_bind_body (par_bind
);
6692 child_fn
= ctx
->cb
.dst_fn
;
6693 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
6694 && !gimple_omp_parallel_combined_p (stmt
))
6696 struct walk_stmt_info wi
;
6699 memset (&wi
, 0, sizeof (wi
));
6702 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
6704 gimple_omp_parallel_set_combined_p (stmt
, true);
6706 if (ctx
->srecord_type
)
6707 create_task_copyfn (stmt
, ctx
);
6709 push_gimplify_context (&gctx
);
6713 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
6714 lower_omp (par_body
, ctx
);
6715 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
6716 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
6718 /* Declare all the variables created by mapping and the variables
6719 declared in the scope of the parallel body. */
6720 record_vars_into (ctx
->block_vars
, child_fn
);
6721 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
6723 if (ctx
->record_type
)
6726 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
6727 : ctx
->record_type
, ".omp_data_o");
6728 DECL_NAMELESS (ctx
->sender_decl
) = 1;
6729 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
6730 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
6735 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
6736 lower_send_shared_vars (&ilist
, &olist
, ctx
);
6738 /* Once all the expansions are done, sequence all the different
6739 fragments inside gimple_omp_body. */
6743 if (ctx
->record_type
)
6745 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
6746 /* fixup_child_record_type might have changed receiver_decl's type. */
6747 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
6748 gimple_seq_add_stmt (&new_body
,
6749 gimple_build_assign (ctx
->receiver_decl
, t
));
6752 gimple_seq_add_seq (&new_body
, par_ilist
);
6753 gimple_seq_add_seq (&new_body
, par_body
);
6754 gimple_seq_add_seq (&new_body
, par_olist
);
6755 new_body
= maybe_catch_exception (new_body
);
6756 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
6757 gimple_omp_set_body (stmt
, new_body
);
6759 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
6760 gimple_bind_add_stmt (bind
, stmt
);
6763 gimple_seq_add_stmt (&ilist
, bind
);
6764 gimple_seq_add_seq (&ilist
, olist
);
6765 bind
= gimple_build_bind (NULL
, ilist
, NULL
);
6768 gsi_replace (gsi_p
, bind
, true);
6770 pop_gimplify_context (NULL
);
6773 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6774 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6775 of OpenMP context, but with task_shared_vars set. */
6778 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
6783 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6784 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
6787 if (task_shared_vars
6789 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
6792 /* If a global variable has been privatized, TREE_CONSTANT on
6793 ADDR_EXPR might be wrong. */
6794 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
6795 recompute_tree_invariant_for_addr_expr (t
);
6797 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
6802 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6804 gimple stmt
= gsi_stmt (*gsi_p
);
6805 struct walk_stmt_info wi
;
6807 if (gimple_has_location (stmt
))
6808 input_location
= gimple_location (stmt
);
6810 if (task_shared_vars
)
6811 memset (&wi
, '\0', sizeof (wi
));
6813 /* If we have issued syntax errors, avoid doing any heavy lifting.
6814 Just replace the OpenMP directives with a NOP to avoid
6815 confusing RTL expansion. */
6816 if (seen_error () && is_gimple_omp (stmt
))
6818 gsi_replace (gsi_p
, gimple_build_nop (), true);
6822 switch (gimple_code (stmt
))
6825 if ((ctx
|| task_shared_vars
)
6826 && (walk_tree (gimple_cond_lhs_ptr (stmt
), lower_omp_regimplify_p
,
6827 ctx
? NULL
: &wi
, NULL
)
6828 || walk_tree (gimple_cond_rhs_ptr (stmt
), lower_omp_regimplify_p
,
6829 ctx
? NULL
: &wi
, NULL
)))
6830 gimple_regimplify_operands (stmt
, gsi_p
);
6833 lower_omp (gimple_catch_handler (stmt
), ctx
);
6835 case GIMPLE_EH_FILTER
:
6836 lower_omp (gimple_eh_filter_failure (stmt
), ctx
);
6839 lower_omp (gimple_try_eval (stmt
), ctx
);
6840 lower_omp (gimple_try_cleanup (stmt
), ctx
);
6843 lower_omp (gimple_bind_body (stmt
), ctx
);
6845 case GIMPLE_OMP_PARALLEL
:
6846 case GIMPLE_OMP_TASK
:
6847 ctx
= maybe_lookup_ctx (stmt
);
6848 lower_omp_taskreg (gsi_p
, ctx
);
6850 case GIMPLE_OMP_FOR
:
6851 ctx
= maybe_lookup_ctx (stmt
);
6853 lower_omp_for (gsi_p
, ctx
);
6855 case GIMPLE_OMP_SECTIONS
:
6856 ctx
= maybe_lookup_ctx (stmt
);
6858 lower_omp_sections (gsi_p
, ctx
);
6860 case GIMPLE_OMP_SINGLE
:
6861 ctx
= maybe_lookup_ctx (stmt
);
6863 lower_omp_single (gsi_p
, ctx
);
6865 case GIMPLE_OMP_MASTER
:
6866 ctx
= maybe_lookup_ctx (stmt
);
6868 lower_omp_master (gsi_p
, ctx
);
6870 case GIMPLE_OMP_ORDERED
:
6871 ctx
= maybe_lookup_ctx (stmt
);
6873 lower_omp_ordered (gsi_p
, ctx
);
6875 case GIMPLE_OMP_CRITICAL
:
6876 ctx
= maybe_lookup_ctx (stmt
);
6878 lower_omp_critical (gsi_p
, ctx
);
6880 case GIMPLE_OMP_ATOMIC_LOAD
:
6881 if ((ctx
|| task_shared_vars
)
6882 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt
),
6883 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
6884 gimple_regimplify_operands (stmt
, gsi_p
);
6887 if ((ctx
|| task_shared_vars
)
6888 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
6890 gimple_regimplify_operands (stmt
, gsi_p
);
6896 lower_omp (gimple_seq body
, omp_context
*ctx
)
6898 location_t saved_location
= input_location
;
6899 gimple_stmt_iterator gsi
= gsi_start (body
);
6900 for (gsi
= gsi_start (body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6901 lower_omp_1 (&gsi
, ctx
);
6902 input_location
= saved_location
;
6905 /* Main entry point. */
6908 execute_lower_omp (void)
6912 /* This pass always runs, to provide PROP_gimple_lomp.
6913 But there is nothing to do unless -fopenmp is given. */
6914 if (flag_openmp
== 0)
6917 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
6918 delete_omp_context
);
6920 body
= gimple_body (current_function_decl
);
6921 scan_omp (body
, NULL
);
6922 gcc_assert (taskreg_nesting_level
== 0);
6924 if (all_contexts
->root
)
6926 struct gimplify_ctx gctx
;
6928 if (task_shared_vars
)
6929 push_gimplify_context (&gctx
);
6930 lower_omp (body
, NULL
);
6931 if (task_shared_vars
)
6932 pop_gimplify_context (NULL
);
6937 splay_tree_delete (all_contexts
);
6938 all_contexts
= NULL
;
6940 BITMAP_FREE (task_shared_vars
);
6944 struct gimple_opt_pass pass_lower_omp
=
6948 "omplower", /* name */
6950 execute_lower_omp
, /* execute */
6953 0, /* static_pass_number */
6954 TV_NONE
, /* tv_id */
6955 PROP_gimple_any
, /* properties_required */
6956 PROP_gimple_lomp
, /* properties_provided */
6957 0, /* properties_destroyed */
6958 0, /* todo_flags_start */
6959 0 /* todo_flags_finish */
6963 /* The following is a utility to diagnose OpenMP structured block violations.
6964 It is not part of the "omplower" pass, as that's invoked too late. It
6965 should be invoked by the respective front ends after gimplification. */
6967 static splay_tree all_labels
;
6969 /* Check for mismatched contexts and generate an error if needed. Return
6970 true if an error is detected. */
6973 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
6974 gimple branch_ctx
, gimple label_ctx
)
6976 if (label_ctx
== branch_ctx
)
6981 Previously we kept track of the label's entire context in diagnose_sb_[12]
6982 so we could traverse it and issue a correct "exit" or "enter" error
6983 message upon a structured block violation.
6985 We built the context by building a list with tree_cons'ing, but there is
6986 no easy counterpart in gimple tuples. It seems like far too much work
6987 for issuing exit/enter error messages. If someone really misses the
6988 distinct error message... patches welcome.
6992 /* Try to avoid confusing the user by producing and error message
6993 with correct "exit" or "enter" verbiage. We prefer "exit"
6994 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6995 if (branch_ctx
== NULL
)
7001 if (TREE_VALUE (label_ctx
) == branch_ctx
)
7006 label_ctx
= TREE_CHAIN (label_ctx
);
7011 error ("invalid exit from OpenMP structured block");
7013 error ("invalid entry to OpenMP structured block");
7016 /* If it's obvious we have an invalid entry, be specific about the error. */
7017 if (branch_ctx
== NULL
)
7018 error ("invalid entry to OpenMP structured block");
7020 /* Otherwise, be vague and lazy, but efficient. */
7021 error ("invalid branch to/from an OpenMP structured block");
7023 gsi_replace (gsi_p
, gimple_build_nop (), false);
7027 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7028 where each label is found. */
7031 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
7032 struct walk_stmt_info
*wi
)
7034 gimple context
= (gimple
) wi
->info
;
7035 gimple inner_context
;
7036 gimple stmt
= gsi_stmt (*gsi_p
);
7038 *handled_ops_p
= true;
7040 switch (gimple_code (stmt
))
7044 case GIMPLE_OMP_PARALLEL
:
7045 case GIMPLE_OMP_TASK
:
7046 case GIMPLE_OMP_SECTIONS
:
7047 case GIMPLE_OMP_SINGLE
:
7048 case GIMPLE_OMP_SECTION
:
7049 case GIMPLE_OMP_MASTER
:
7050 case GIMPLE_OMP_ORDERED
:
7051 case GIMPLE_OMP_CRITICAL
:
7052 /* The minimal context here is just the current OMP construct. */
7053 inner_context
= stmt
;
7054 wi
->info
= inner_context
;
7055 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
7059 case GIMPLE_OMP_FOR
:
7060 inner_context
= stmt
;
7061 wi
->info
= inner_context
;
7062 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7064 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
7065 diagnose_sb_1
, NULL
, wi
);
7066 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
7071 splay_tree_insert (all_labels
, (splay_tree_key
) gimple_label_label (stmt
),
7072 (splay_tree_value
) context
);
7082 /* Pass 2: Check each branch and see if its context differs from that of
7083 the destination label's context. */
7086 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
7087 struct walk_stmt_info
*wi
)
7089 gimple context
= (gimple
) wi
->info
;
7091 gimple stmt
= gsi_stmt (*gsi_p
);
7093 *handled_ops_p
= true;
7095 switch (gimple_code (stmt
))
7099 case GIMPLE_OMP_PARALLEL
:
7100 case GIMPLE_OMP_TASK
:
7101 case GIMPLE_OMP_SECTIONS
:
7102 case GIMPLE_OMP_SINGLE
:
7103 case GIMPLE_OMP_SECTION
:
7104 case GIMPLE_OMP_MASTER
:
7105 case GIMPLE_OMP_ORDERED
:
7106 case GIMPLE_OMP_CRITICAL
:
7108 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
7112 case GIMPLE_OMP_FOR
:
7114 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7116 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
7117 diagnose_sb_2
, NULL
, wi
);
7118 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
7124 tree lab
= gimple_cond_true_label (stmt
);
7127 n
= splay_tree_lookup (all_labels
,
7128 (splay_tree_key
) lab
);
7129 diagnose_sb_0 (gsi_p
, context
,
7130 n
? (gimple
) n
->value
: NULL
);
7132 lab
= gimple_cond_false_label (stmt
);
7135 n
= splay_tree_lookup (all_labels
,
7136 (splay_tree_key
) lab
);
7137 diagnose_sb_0 (gsi_p
, context
,
7138 n
? (gimple
) n
->value
: NULL
);
7145 tree lab
= gimple_goto_dest (stmt
);
7146 if (TREE_CODE (lab
) != LABEL_DECL
)
7149 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
7150 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
7157 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
7159 tree lab
= CASE_LABEL (gimple_switch_label (stmt
, i
));
7160 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
7161 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
7168 diagnose_sb_0 (gsi_p
, context
, NULL
);
7179 diagnose_omp_structured_block_errors (void)
7181 struct walk_stmt_info wi
;
7182 gimple_seq body
= gimple_body (current_function_decl
);
7184 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
7186 memset (&wi
, 0, sizeof (wi
));
7187 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
7189 memset (&wi
, 0, sizeof (wi
));
7190 wi
.want_locations
= true;
7191 walk_gimple_seq (body
, diagnose_sb_2
, NULL
, &wi
);
7193 splay_tree_delete (all_labels
);
7200 gate_diagnose_omp_blocks (void)
7202 return flag_openmp
!= 0;
7205 struct gimple_opt_pass pass_diagnose_omp_blocks
=
7209 "*diagnose_omp_blocks", /* name */
7210 gate_diagnose_omp_blocks
, /* gate */
7211 diagnose_omp_structured_block_errors
, /* execute */
7214 0, /* static_pass_number */
7215 TV_NONE
, /* tv_id */
7216 PROP_gimple_any
, /* properties_required */
7217 0, /* properties_provided */
7218 0, /* properties_destroyed */
7219 0, /* todo_flags_start */
7220 0, /* todo_flags_finish */
7224 #include "gt-omp-low.h"