1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
35 #include "tree-flow.h"
39 #include "tree-pass.h"
42 #include "splay-tree.h"
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
61 typedef struct omp_context
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context
*outer
;
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
80 /* These are used just by task contexts, if task firstprivate fn is
81 needed. srecord_type is used to communicate from the thread
82 that encountered the task construct to task firstprivate fn,
83 record_type is allocated by GOMP_task, initialized by task firstprivate
84 fn and passed to the task body fn. */
85 splay_tree sfield_map
;
88 /* A chain of variables to add to the top-level block surrounding the
89 construct. In the case of a parallel, this is in the child function. */
92 /* What to do with variables with implicitly determined sharing
94 enum omp_clause_default_kind default_kind
;
96 /* Nesting depth of this context. Used to beautify error messages re
97 invalid gotos. The outermost ctx is depth 1, with depth 0 being
98 reserved for the main body of the function. */
101 /* True if this parallel directive is nested within another. */
106 struct omp_for_data_loop
108 tree v
, n1
, n2
, step
;
109 enum tree_code cond_code
;
112 /* A structure describing the main elements of a parallel loop. */
116 struct omp_for_data_loop loop
;
121 bool have_nowait
, have_ordered
;
122 enum omp_clause_schedule_kind sched_kind
;
123 struct omp_for_data_loop
*loops
;
127 static splay_tree all_contexts
;
128 static int taskreg_nesting_level
;
129 struct omp_region
*root_omp_region
;
130 static bitmap task_shared_vars
;
132 static void scan_omp (gimple_seq
*, omp_context
*);
133 static tree
scan_omp_1_op (tree
*, int *, void *);
135 #define WALK_SUBSTMTS \
139 case GIMPLE_EH_FILTER: \
140 case GIMPLE_TRANSACTION: \
141 /* The sub-statements for these should be walked. */ \
142 *handled_ops_p = false; \
145 /* Convenience function for calling scan_omp_1_op on tree operands. */
148 scan_omp_op (tree
*tp
, omp_context
*ctx
)
150 struct walk_stmt_info wi
;
152 memset (&wi
, 0, sizeof (wi
));
154 wi
.want_locations
= true;
156 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
159 static void lower_omp (gimple_seq
*, omp_context
*);
160 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
161 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
163 /* Find an OpenMP clause of type KIND within CLAUSES. */
166 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
168 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
169 if (OMP_CLAUSE_CODE (clauses
) == kind
)
175 /* Return true if CTX is for an omp parallel. */
178 is_parallel_ctx (omp_context
*ctx
)
180 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
184 /* Return true if CTX is for an omp task. */
187 is_task_ctx (omp_context
*ctx
)
189 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
193 /* Return true if CTX is for an omp parallel or omp task. */
196 is_taskreg_ctx (omp_context
*ctx
)
198 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
199 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
203 /* Return true if REGION is a combined parallel+workshare region. */
206 is_combined_parallel (struct omp_region
*region
)
208 return region
->is_combined_parallel
;
212 /* Extract the header elements of parallel loop FOR_STMT and store
216 extract_omp_for_data (gimple for_stmt
, struct omp_for_data
*fd
,
217 struct omp_for_data_loop
*loops
)
219 tree t
, var
, *collapse_iter
, *collapse_count
;
220 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
221 struct omp_for_data_loop
*loop
;
223 struct omp_for_data_loop dummy_loop
;
224 location_t loc
= gimple_location (for_stmt
);
226 fd
->for_stmt
= for_stmt
;
228 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
229 if (fd
->collapse
> 1)
232 fd
->loops
= &fd
->loop
;
234 fd
->have_nowait
= fd
->have_ordered
= false;
235 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
236 fd
->chunk_size
= NULL_TREE
;
237 collapse_iter
= NULL
;
238 collapse_count
= NULL
;
240 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
241 switch (OMP_CLAUSE_CODE (t
))
243 case OMP_CLAUSE_NOWAIT
:
244 fd
->have_nowait
= true;
246 case OMP_CLAUSE_ORDERED
:
247 fd
->have_ordered
= true;
249 case OMP_CLAUSE_SCHEDULE
:
250 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
251 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
253 case OMP_CLAUSE_COLLAPSE
:
254 if (fd
->collapse
> 1)
256 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
257 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
269 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
270 gcc_assert (fd
->chunk_size
== NULL
);
272 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
273 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
274 gcc_assert (fd
->chunk_size
== NULL
);
275 else if (fd
->chunk_size
== NULL
)
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
282 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
283 ? integer_zero_node
: integer_one_node
;
286 for (i
= 0; i
< fd
->collapse
; i
++)
288 if (fd
->collapse
== 1)
290 else if (loops
!= NULL
)
296 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
297 gcc_assert (SSA_VAR_P (loop
->v
));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
300 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
301 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
303 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
304 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
305 switch (loop
->cond_code
)
311 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
312 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, 1);
314 loop
->n2
= fold_build2_loc (loc
,
315 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
316 build_int_cst (TREE_TYPE (loop
->n2
), 1));
317 loop
->cond_code
= LT_EXPR
;
320 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
321 loop
->n2
= fold_build_pointer_plus_hwi_loc (loc
, loop
->n2
, -1);
323 loop
->n2
= fold_build2_loc (loc
,
324 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
325 build_int_cst (TREE_TYPE (loop
->n2
), 1));
326 loop
->cond_code
= GT_EXPR
;
332 t
= gimple_omp_for_incr (for_stmt
, i
);
333 gcc_assert (TREE_OPERAND (t
, 0) == var
);
334 switch (TREE_CODE (t
))
337 loop
->step
= TREE_OPERAND (t
, 1);
339 case POINTER_PLUS_EXPR
:
340 loop
->step
= fold_convert (ssizetype
, TREE_OPERAND (t
, 1));
343 loop
->step
= TREE_OPERAND (t
, 1);
344 loop
->step
= fold_build1_loc (loc
,
345 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
352 if (iter_type
!= long_long_unsigned_type_node
)
354 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
355 iter_type
= long_long_unsigned_type_node
;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
357 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
358 >= TYPE_PRECISION (iter_type
))
362 if (loop
->cond_code
== LT_EXPR
)
363 n
= fold_build2_loc (loc
,
364 PLUS_EXPR
, TREE_TYPE (loop
->v
),
365 loop
->n2
, loop
->step
);
368 if (TREE_CODE (n
) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
370 iter_type
= long_long_unsigned_type_node
;
372 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
373 > TYPE_PRECISION (iter_type
))
377 if (loop
->cond_code
== LT_EXPR
)
380 n2
= fold_build2_loc (loc
,
381 PLUS_EXPR
, TREE_TYPE (loop
->v
),
382 loop
->n2
, loop
->step
);
386 n1
= fold_build2_loc (loc
,
387 MINUS_EXPR
, TREE_TYPE (loop
->v
),
388 loop
->n2
, loop
->step
);
391 if (TREE_CODE (n1
) != INTEGER_CST
392 || TREE_CODE (n2
) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
394 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
395 iter_type
= long_long_unsigned_type_node
;
399 if (collapse_count
&& *collapse_count
== NULL
)
401 if ((i
== 0 || count
!= NULL_TREE
)
402 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
403 && TREE_CONSTANT (loop
->n1
)
404 && TREE_CONSTANT (loop
->n2
)
405 && TREE_CODE (loop
->step
) == INTEGER_CST
)
407 tree itype
= TREE_TYPE (loop
->v
);
409 if (POINTER_TYPE_P (itype
))
410 itype
= signed_type_for (itype
);
411 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
412 t
= fold_build2_loc (loc
,
414 fold_convert_loc (loc
, itype
, loop
->step
), t
);
415 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
416 fold_convert_loc (loc
, itype
, loop
->n2
));
417 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
418 fold_convert_loc (loc
, itype
, loop
->n1
));
419 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
420 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
421 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
422 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
423 fold_convert_loc (loc
, itype
,
426 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
427 fold_convert_loc (loc
, itype
, loop
->step
));
428 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
429 if (count
!= NULL_TREE
)
430 count
= fold_build2_loc (loc
,
431 MULT_EXPR
, long_long_unsigned_type_node
,
435 if (TREE_CODE (count
) != INTEGER_CST
)
445 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
446 iter_type
= long_long_unsigned_type_node
;
448 iter_type
= long_integer_type_node
;
450 else if (collapse_iter
&& *collapse_iter
!= NULL
)
451 iter_type
= TREE_TYPE (*collapse_iter
);
452 fd
->iter_type
= iter_type
;
453 if (collapse_iter
&& *collapse_iter
== NULL
)
454 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
455 if (collapse_count
&& *collapse_count
== NULL
)
458 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
460 *collapse_count
= create_tmp_var (iter_type
, ".count");
463 if (fd
->collapse
> 1)
465 fd
->loop
.v
= *collapse_iter
;
466 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
467 fd
->loop
.n2
= *collapse_count
;
468 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
469 fd
->loop
.cond_code
= LT_EXPR
;
474 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
475 is the immediate dominator of PAR_ENTRY_BB, return true if there
476 are no data dependencies that would prevent expanding the parallel
477 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479 When expanding a combined parallel+workshare region, the call to
480 the child function may need additional arguments in the case of
481 GIMPLE_OMP_FOR regions. In some cases, these arguments are
482 computed out of variables passed in from the parent to the child
483 via 'struct .omp_data_s'. For instance:
485 #pragma omp parallel for schedule (guided, i * 4)
490 # BLOCK 2 (PAR_ENTRY_BB)
492 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
494 # BLOCK 3 (WS_ENTRY_BB)
495 .omp_data_i = &.omp_data_o;
496 D.1667 = .omp_data_i->i;
498 #pragma omp for schedule (guided, D.1598)
500 When we outline the parallel region, the call to the child function
501 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
502 that value is computed *after* the call site. So, in principle we
503 cannot do the transformation.
505 To see whether the code in WS_ENTRY_BB blocks the combined
506 parallel+workshare call, we collect all the variables used in the
507 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
508 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
511 FIXME. If we had the SSA form built at this point, we could merely
512 hoist the code in block 3 into block 2 and be done with it. But at
513 this point we don't have dataflow information and though we could
514 hack something up here, it is really not worth the aggravation. */
517 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
519 struct omp_for_data fd
;
520 gimple ws_stmt
= last_stmt (ws_entry_bb
);
522 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
525 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
527 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
529 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
531 if (fd
.iter_type
!= long_integer_type_node
)
534 /* FIXME. We give up too easily here. If any of these arguments
535 are not constants, they will likely involve variables that have
536 been mapped into fields of .omp_data_s for sharing with the child
537 function. With appropriate data flow, it would be possible to
539 if (!is_gimple_min_invariant (fd
.loop
.n1
)
540 || !is_gimple_min_invariant (fd
.loop
.n2
)
541 || !is_gimple_min_invariant (fd
.loop
.step
)
542 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
549 /* Collect additional arguments needed to emit a combined
550 parallel+workshare call. WS_STMT is the workshare directive being
553 static vec
<tree
, va_gc
> *
554 get_ws_args_for (gimple ws_stmt
)
557 location_t loc
= gimple_location (ws_stmt
);
558 vec
<tree
, va_gc
> *ws_args
;
560 if (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
)
562 struct omp_for_data fd
;
564 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
566 vec_alloc (ws_args
, 3 + (fd
.chunk_size
!= 0));
568 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n1
);
569 ws_args
->quick_push (t
);
571 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n2
);
572 ws_args
->quick_push (t
);
574 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
575 ws_args
->quick_push (t
);
579 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
580 ws_args
->quick_push (t
);
585 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
587 /* Number of sections is equal to the number of edges from the
588 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
589 the exit of the sections region. */
590 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
591 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
592 vec_alloc (ws_args
, 1);
593 ws_args
->quick_push (t
);
601 /* Discover whether REGION is a combined parallel+workshare region. */
604 determine_parallel_type (struct omp_region
*region
)
606 basic_block par_entry_bb
, par_exit_bb
;
607 basic_block ws_entry_bb
, ws_exit_bb
;
609 if (region
== NULL
|| region
->inner
== NULL
610 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
611 || region
->inner
->cont
== NULL
)
614 /* We only support parallel+for and parallel+sections. */
615 if (region
->type
!= GIMPLE_OMP_PARALLEL
616 || (region
->inner
->type
!= GIMPLE_OMP_FOR
617 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
620 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
621 WS_EXIT_BB -> PAR_EXIT_BB. */
622 par_entry_bb
= region
->entry
;
623 par_exit_bb
= region
->exit
;
624 ws_entry_bb
= region
->inner
->entry
;
625 ws_exit_bb
= region
->inner
->exit
;
627 if (single_succ (par_entry_bb
) == ws_entry_bb
628 && single_succ (ws_exit_bb
) == par_exit_bb
629 && workshare_safe_to_combine_p (ws_entry_bb
)
630 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
631 || (last_and_only_stmt (ws_entry_bb
)
632 && last_and_only_stmt (par_exit_bb
))))
634 gimple ws_stmt
= last_stmt (ws_entry_bb
);
636 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
638 /* If this is a combined parallel loop, we need to determine
639 whether or not to use the combined library calls. There
640 are two cases where we do not apply the transformation:
641 static loops and any kind of ordered loop. In the first
642 case, we already open code the loop so there is no need
643 to do anything else. In the latter case, the combined
644 parallel loop call would still need extra synchronization
645 to implement ordered semantics, so there would not be any
646 gain in using the combined call. */
647 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
648 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
650 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
651 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
653 region
->is_combined_parallel
= false;
654 region
->inner
->is_combined_parallel
= false;
659 region
->is_combined_parallel
= true;
660 region
->inner
->is_combined_parallel
= true;
661 region
->ws_args
= get_ws_args_for (ws_stmt
);
666 /* Return true if EXPR is variable sized. */
669 is_variable_sized (const_tree expr
)
671 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
674 /* Return true if DECL is a reference type. */
677 is_reference (tree decl
)
679 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
682 /* Lookup variables in the decl or field splay trees. The "maybe" form
683 allows for the variable form to not have been entered, otherwise we
684 assert that the variable must have been entered. */
687 lookup_decl (tree var
, omp_context
*ctx
)
690 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
695 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
698 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
699 return n
? *n
: NULL_TREE
;
703 lookup_field (tree var
, omp_context
*ctx
)
706 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
707 return (tree
) n
->value
;
711 lookup_sfield (tree var
, omp_context
*ctx
)
714 n
= splay_tree_lookup (ctx
->sfield_map
715 ? ctx
->sfield_map
: ctx
->field_map
,
716 (splay_tree_key
) var
);
717 return (tree
) n
->value
;
721 maybe_lookup_field (tree var
, omp_context
*ctx
)
724 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
725 return n
? (tree
) n
->value
: NULL_TREE
;
728 /* Return true if DECL should be copied by pointer. SHARED_CTX is
729 the parallel context if DECL is to be shared. */
732 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
734 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
737 /* We can only use copy-in/copy-out semantics for shared variables
738 when we know the value is not accessible from an outer scope. */
741 /* ??? Trivially accessible from anywhere. But why would we even
742 be passing an address in this case? Should we simply assert
743 this to be false, or should we have a cleanup pass that removes
744 these from the list of mappings? */
745 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
748 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
749 without analyzing the expression whether or not its location
750 is accessible to anyone else. In the case of nested parallel
751 regions it certainly may be. */
752 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
755 /* Do not use copy-in/copy-out for variables that have their
757 if (TREE_ADDRESSABLE (decl
))
760 /* lower_send_shared_vars only uses copy-in, but not copy-out
762 if (TREE_READONLY (decl
)
763 || ((TREE_CODE (decl
) == RESULT_DECL
764 || TREE_CODE (decl
) == PARM_DECL
)
765 && DECL_BY_REFERENCE (decl
)))
768 /* Disallow copy-in/out in nested parallel if
769 decl is shared in outer parallel, otherwise
770 each thread could store the shared variable
771 in its own copy-in location, making the
772 variable no longer really shared. */
773 if (shared_ctx
->is_nested
)
777 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
778 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
785 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
786 c
; c
= OMP_CLAUSE_CHAIN (c
))
787 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
788 && OMP_CLAUSE_DECL (c
) == decl
)
792 goto maybe_mark_addressable_and_ret
;
796 /* For tasks avoid using copy-in/out. As tasks can be
797 deferred or executed in different thread, when GOMP_task
798 returns, the task hasn't necessarily terminated. */
799 if (is_task_ctx (shared_ctx
))
802 maybe_mark_addressable_and_ret
:
803 outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
804 if (is_gimple_reg (outer
))
806 /* Taking address of OUTER in lower_send_shared_vars
807 might need regimplification of everything that uses the
809 if (!task_shared_vars
)
810 task_shared_vars
= BITMAP_ALLOC (NULL
);
811 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
812 TREE_ADDRESSABLE (outer
) = 1;
821 /* Create a new VAR_DECL and copy information from VAR to it. */
824 copy_var_decl (tree var
, tree name
, tree type
)
826 tree copy
= build_decl (DECL_SOURCE_LOCATION (var
), VAR_DECL
, name
, type
);
828 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
829 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
830 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
831 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
832 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
833 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
834 TREE_USED (copy
) = 1;
835 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
840 /* Construct a new automatic decl similar to VAR. */
843 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
845 tree copy
= copy_var_decl (var
, name
, type
);
847 DECL_CONTEXT (copy
) = current_function_decl
;
848 DECL_CHAIN (copy
) = ctx
->block_vars
;
849 ctx
->block_vars
= copy
;
855 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
857 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
860 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
863 omp_build_component_ref (tree obj
, tree field
)
865 tree ret
= build3 (COMPONENT_REF
, TREE_TYPE (field
), obj
, field
, NULL
);
866 if (TREE_THIS_VOLATILE (field
))
867 TREE_THIS_VOLATILE (ret
) |= 1;
868 if (TREE_READONLY (field
))
869 TREE_READONLY (ret
) |= 1;
873 /* Build tree nodes to access the field for VAR on the receiver side. */
876 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
878 tree x
, field
= lookup_field (var
, ctx
);
880 /* If the receiver record type was remapped in the child function,
881 remap the field into the new record type. */
882 x
= maybe_lookup_field (field
, ctx
);
886 x
= build_simple_mem_ref (ctx
->receiver_decl
);
887 x
= omp_build_component_ref (x
, field
);
889 x
= build_simple_mem_ref (x
);
894 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
895 of a parallel, this is a component reference; for workshare constructs
896 this is some variable. */
899 build_outer_var_ref (tree var
, omp_context
*ctx
)
903 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
905 else if (is_variable_sized (var
))
907 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
908 x
= build_outer_var_ref (x
, ctx
);
909 x
= build_simple_mem_ref (x
);
911 else if (is_taskreg_ctx (ctx
))
913 bool by_ref
= use_pointer_for_field (var
, NULL
);
914 x
= build_receiver_ref (var
, by_ref
, ctx
);
917 x
= lookup_decl (var
, ctx
->outer
);
918 else if (is_reference (var
))
919 /* This can happen with orphaned constructs. If var is reference, it is
920 possible it is shared and as such valid. */
925 if (is_reference (var
))
926 x
= build_simple_mem_ref (x
);
931 /* Build tree nodes to access the field for VAR on the sender side. */
934 build_sender_ref (tree var
, omp_context
*ctx
)
936 tree field
= lookup_sfield (var
, ctx
);
937 return omp_build_component_ref (ctx
->sender_decl
, field
);
940 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
943 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
945 tree field
, type
, sfield
= NULL_TREE
;
947 gcc_assert ((mask
& 1) == 0
948 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
949 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
950 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
952 type
= TREE_TYPE (var
);
954 type
= build_pointer_type (type
);
955 else if ((mask
& 3) == 1 && is_reference (var
))
956 type
= TREE_TYPE (type
);
958 field
= build_decl (DECL_SOURCE_LOCATION (var
),
959 FIELD_DECL
, DECL_NAME (var
), type
);
961 /* Remember what variable this field was created for. This does have a
962 side effect of making dwarf2out ignore this member, so for helpful
963 debugging we clear it later in delete_omp_context. */
964 DECL_ABSTRACT_ORIGIN (field
) = var
;
965 if (type
== TREE_TYPE (var
))
967 DECL_ALIGN (field
) = DECL_ALIGN (var
);
968 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
969 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
972 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
976 insert_field_into_struct (ctx
->record_type
, field
);
977 if (ctx
->srecord_type
)
979 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
980 FIELD_DECL
, DECL_NAME (var
), type
);
981 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
982 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
983 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
984 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
985 insert_field_into_struct (ctx
->srecord_type
, sfield
);
990 if (ctx
->srecord_type
== NULL_TREE
)
994 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
995 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
996 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
998 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
999 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
1000 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
1001 insert_field_into_struct (ctx
->srecord_type
, sfield
);
1002 splay_tree_insert (ctx
->sfield_map
,
1003 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
1004 (splay_tree_value
) sfield
);
1008 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
1009 : ctx
->srecord_type
, field
);
1013 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
1014 (splay_tree_value
) field
);
1015 if ((mask
& 2) && ctx
->sfield_map
)
1016 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
1017 (splay_tree_value
) sfield
);
1021 install_var_local (tree var
, omp_context
*ctx
)
1023 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1024 insert_decl_map (&ctx
->cb
, var
, new_var
);
1028 /* Adjust the replacement for DECL in CTX for the new context. This means
1029 copying the DECL_VALUE_EXPR, and fixing up the type. */
1032 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1034 tree new_decl
, size
;
1036 new_decl
= lookup_decl (decl
, ctx
);
1038 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1040 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1041 && DECL_HAS_VALUE_EXPR_P (decl
))
1043 tree ve
= DECL_VALUE_EXPR (decl
);
1044 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1045 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1046 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1049 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1051 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1052 if (size
== error_mark_node
)
1053 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1054 DECL_SIZE (new_decl
) = size
;
1056 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1057 if (size
== error_mark_node
)
1058 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1059 DECL_SIZE_UNIT (new_decl
) = size
;
1063 /* The callback for remap_decl. Search all containing contexts for a
1064 mapping of the variable; this avoids having to duplicate the splay
1065 tree ahead of time. We know a mapping doesn't already exist in the
1066 given context. Create new mappings to implement default semantics. */
1069 omp_copy_decl (tree var
, copy_body_data
*cb
)
1071 omp_context
*ctx
= (omp_context
*) cb
;
1074 if (TREE_CODE (var
) == LABEL_DECL
)
1076 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1077 DECL_CONTEXT (new_var
) = current_function_decl
;
1078 insert_decl_map (&ctx
->cb
, var
, new_var
);
1082 while (!is_taskreg_ctx (ctx
))
1087 new_var
= maybe_lookup_decl (var
, ctx
);
1092 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1095 return error_mark_node
;
1099 /* Return the parallel region associated with STMT. */
1101 /* Debugging dumps for parallel regions. */
1102 void dump_omp_region (FILE *, struct omp_region
*, int);
1103 void debug_omp_region (struct omp_region
*);
1104 void debug_all_omp_regions (void);
1106 /* Dump the parallel region tree rooted at REGION. */
1109 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1111 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1112 gimple_code_name
[region
->type
]);
1115 dump_omp_region (file
, region
->inner
, indent
+ 4);
1119 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1120 region
->cont
->index
);
1124 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1125 region
->exit
->index
);
1127 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1130 dump_omp_region (file
, region
->next
, indent
);
1134 debug_omp_region (struct omp_region
*region
)
1136 dump_omp_region (stderr
, region
, 0);
1140 debug_all_omp_regions (void)
1142 dump_omp_region (stderr
, root_omp_region
, 0);
1146 /* Create a new parallel region starting at STMT inside region PARENT. */
1149 new_omp_region (basic_block bb
, enum gimple_code type
,
1150 struct omp_region
*parent
)
1152 struct omp_region
*region
= XCNEW (struct omp_region
);
1154 region
->outer
= parent
;
1156 region
->type
= type
;
1160 /* This is a nested region. Add it to the list of inner
1161 regions in PARENT. */
1162 region
->next
= parent
->inner
;
1163 parent
->inner
= region
;
1167 /* This is a toplevel region. Add it to the list of toplevel
1168 regions in ROOT_OMP_REGION. */
1169 region
->next
= root_omp_region
;
1170 root_omp_region
= region
;
1176 /* Release the memory associated with the region tree rooted at REGION. */
1179 free_omp_region_1 (struct omp_region
*region
)
1181 struct omp_region
*i
, *n
;
1183 for (i
= region
->inner
; i
; i
= n
)
1186 free_omp_region_1 (i
);
1192 /* Release the memory for the entire omp region tree. */
1195 free_omp_regions (void)
1197 struct omp_region
*r
, *n
;
1198 for (r
= root_omp_region
; r
; r
= n
)
1201 free_omp_region_1 (r
);
1203 root_omp_region
= NULL
;
1207 /* Create a new context, with OUTER_CTX being the surrounding context. */
1209 static omp_context
*
1210 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1212 omp_context
*ctx
= XCNEW (omp_context
);
1214 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1215 (splay_tree_value
) ctx
);
1220 ctx
->outer
= outer_ctx
;
1221 ctx
->cb
= outer_ctx
->cb
;
1222 ctx
->cb
.block
= NULL
;
1223 ctx
->depth
= outer_ctx
->depth
+ 1;
1227 ctx
->cb
.src_fn
= current_function_decl
;
1228 ctx
->cb
.dst_fn
= current_function_decl
;
1229 ctx
->cb
.src_node
= cgraph_get_node (current_function_decl
);
1230 gcc_checking_assert (ctx
->cb
.src_node
);
1231 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1232 ctx
->cb
.src_cfun
= cfun
;
1233 ctx
->cb
.copy_decl
= omp_copy_decl
;
1234 ctx
->cb
.eh_lp_nr
= 0;
1235 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1239 ctx
->cb
.decl_map
= pointer_map_create ();
1244 static gimple_seq
maybe_catch_exception (gimple_seq
);
1246 /* Finalize task copyfn. */
1249 finalize_task_copyfn (gimple task_stmt
)
1251 struct function
*child_cfun
;
1253 gimple_seq seq
= NULL
, new_seq
;
1256 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1257 if (child_fn
== NULL_TREE
)
1260 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1262 /* Inform the callgraph about the new function. */
1263 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
1264 = cfun
->curr_properties
& ~PROP_loops
;
1266 push_cfun (child_cfun
);
1267 bind
= gimplify_body (child_fn
, false);
1268 gimple_seq_add_stmt (&seq
, bind
);
1269 new_seq
= maybe_catch_exception (seq
);
1272 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1274 gimple_seq_add_stmt (&seq
, bind
);
1276 gimple_set_body (child_fn
, seq
);
1279 cgraph_add_new_function (child_fn
, false);
1282 /* Destroy a omp_context data structures. Called through the splay tree
1283 value delete callback. */
1286 delete_omp_context (splay_tree_value value
)
1288 omp_context
*ctx
= (omp_context
*) value
;
1290 pointer_map_destroy (ctx
->cb
.decl_map
);
1293 splay_tree_delete (ctx
->field_map
);
1294 if (ctx
->sfield_map
)
1295 splay_tree_delete (ctx
->sfield_map
);
1297 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1298 it produces corrupt debug information. */
1299 if (ctx
->record_type
)
1302 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= DECL_CHAIN (t
))
1303 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1305 if (ctx
->srecord_type
)
1308 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= DECL_CHAIN (t
))
1309 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1312 if (is_task_ctx (ctx
))
1313 finalize_task_copyfn (ctx
->stmt
);
1318 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1322 fixup_child_record_type (omp_context
*ctx
)
1324 tree f
, type
= ctx
->record_type
;
1326 /* ??? It isn't sufficient to just call remap_type here, because
1327 variably_modified_type_p doesn't work the way we expect for
1328 record types. Testing each field for whether it needs remapping
1329 and creating a new record by hand works, however. */
1330 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
1331 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1335 tree name
, new_fields
= NULL
;
1337 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1338 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1339 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1340 TYPE_DECL
, name
, type
);
1341 TYPE_NAME (type
) = name
;
1343 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= DECL_CHAIN (f
))
1345 tree new_f
= copy_node (f
);
1346 DECL_CONTEXT (new_f
) = type
;
1347 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1348 DECL_CHAIN (new_f
) = new_fields
;
1349 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1350 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1352 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1356 /* Arrange to be able to look up the receiver field
1357 given the sender field. */
1358 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1359 (splay_tree_value
) new_f
);
1361 TYPE_FIELDS (type
) = nreverse (new_fields
);
1365 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
1368 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1369 specified by CLAUSES. */
1372 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1375 bool scan_array_reductions
= false;
1377 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1381 switch (OMP_CLAUSE_CODE (c
))
1383 case OMP_CLAUSE_PRIVATE
:
1384 decl
= OMP_CLAUSE_DECL (c
);
1385 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1387 else if (!is_variable_sized (decl
))
1388 install_var_local (decl
, ctx
);
1391 case OMP_CLAUSE_SHARED
:
1392 gcc_assert (is_taskreg_ctx (ctx
));
1393 decl
= OMP_CLAUSE_DECL (c
);
1394 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1395 || !is_variable_sized (decl
));
1396 /* Global variables don't need to be copied,
1397 the receiver side will use them directly. */
1398 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1400 by_ref
= use_pointer_for_field (decl
, ctx
);
1401 if (! TREE_READONLY (decl
)
1402 || TREE_ADDRESSABLE (decl
)
1404 || is_reference (decl
))
1406 install_var_field (decl
, by_ref
, 3, ctx
);
1407 install_var_local (decl
, ctx
);
1410 /* We don't need to copy const scalar vars back. */
1411 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1414 case OMP_CLAUSE_LASTPRIVATE
:
1415 /* Let the corresponding firstprivate clause create
1417 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1421 case OMP_CLAUSE_FIRSTPRIVATE
:
1422 case OMP_CLAUSE_REDUCTION
:
1423 decl
= OMP_CLAUSE_DECL (c
);
1425 if (is_variable_sized (decl
))
1427 if (is_task_ctx (ctx
))
1428 install_var_field (decl
, false, 1, ctx
);
1431 else if (is_taskreg_ctx (ctx
))
1434 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1435 by_ref
= use_pointer_for_field (decl
, NULL
);
1437 if (is_task_ctx (ctx
)
1438 && (global
|| by_ref
|| is_reference (decl
)))
1440 install_var_field (decl
, false, 1, ctx
);
1442 install_var_field (decl
, by_ref
, 2, ctx
);
1445 install_var_field (decl
, by_ref
, 3, ctx
);
1447 install_var_local (decl
, ctx
);
1450 case OMP_CLAUSE_COPYPRIVATE
:
1451 case OMP_CLAUSE_COPYIN
:
1452 decl
= OMP_CLAUSE_DECL (c
);
1453 by_ref
= use_pointer_for_field (decl
, NULL
);
1454 install_var_field (decl
, by_ref
, 3, ctx
);
1457 case OMP_CLAUSE_DEFAULT
:
1458 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1461 case OMP_CLAUSE_FINAL
:
1463 case OMP_CLAUSE_NUM_THREADS
:
1464 case OMP_CLAUSE_SCHEDULE
:
1466 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1469 case OMP_CLAUSE_NOWAIT
:
1470 case OMP_CLAUSE_ORDERED
:
1471 case OMP_CLAUSE_COLLAPSE
:
1472 case OMP_CLAUSE_UNTIED
:
1473 case OMP_CLAUSE_MERGEABLE
:
1481 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1483 switch (OMP_CLAUSE_CODE (c
))
1485 case OMP_CLAUSE_LASTPRIVATE
:
1486 /* Let the corresponding firstprivate clause create
1488 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1489 scan_array_reductions
= true;
1490 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1494 case OMP_CLAUSE_PRIVATE
:
1495 case OMP_CLAUSE_FIRSTPRIVATE
:
1496 case OMP_CLAUSE_REDUCTION
:
1497 decl
= OMP_CLAUSE_DECL (c
);
1498 if (is_variable_sized (decl
))
1499 install_var_local (decl
, ctx
);
1500 fixup_remapped_decl (decl
, ctx
,
1501 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1502 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1503 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1504 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1505 scan_array_reductions
= true;
1508 case OMP_CLAUSE_SHARED
:
1509 decl
= OMP_CLAUSE_DECL (c
);
1510 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1511 fixup_remapped_decl (decl
, ctx
, false);
1514 case OMP_CLAUSE_COPYPRIVATE
:
1515 case OMP_CLAUSE_COPYIN
:
1516 case OMP_CLAUSE_DEFAULT
:
1518 case OMP_CLAUSE_NUM_THREADS
:
1519 case OMP_CLAUSE_SCHEDULE
:
1520 case OMP_CLAUSE_NOWAIT
:
1521 case OMP_CLAUSE_ORDERED
:
1522 case OMP_CLAUSE_COLLAPSE
:
1523 case OMP_CLAUSE_UNTIED
:
1524 case OMP_CLAUSE_FINAL
:
1525 case OMP_CLAUSE_MERGEABLE
:
1533 if (scan_array_reductions
)
1534 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1535 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1536 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1538 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
1539 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
1541 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
1542 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1543 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
1546 /* Create a new name for omp child function. Returns an identifier. */
1548 static GTY(()) unsigned int tmp_ompfn_id_num
;
1551 create_omp_child_function_name (bool task_copy
)
1553 return (clone_function_name (current_function_decl
,
1554 task_copy
? "_omp_cpyfn" : "_omp_fn"));
1557 /* Build a decl for the omp child function. It'll not contain a body
1558 yet, just the bare decl. */
1561 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
1563 tree decl
, type
, name
, t
;
1565 name
= create_omp_child_function_name (task_copy
);
1567 type
= build_function_type_list (void_type_node
, ptr_type_node
,
1568 ptr_type_node
, NULL_TREE
);
1570 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1572 decl
= build_decl (gimple_location (ctx
->stmt
),
1573 FUNCTION_DECL
, name
, type
);
1576 ctx
->cb
.dst_fn
= decl
;
1578 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
1580 TREE_STATIC (decl
) = 1;
1581 TREE_USED (decl
) = 1;
1582 DECL_ARTIFICIAL (decl
) = 1;
1583 DECL_NAMELESS (decl
) = 1;
1584 DECL_IGNORED_P (decl
) = 0;
1585 TREE_PUBLIC (decl
) = 0;
1586 DECL_UNINLINABLE (decl
) = 1;
1587 DECL_EXTERNAL (decl
) = 0;
1588 DECL_CONTEXT (decl
) = NULL_TREE
;
1589 DECL_INITIAL (decl
) = make_node (BLOCK
);
1591 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1592 RESULT_DECL
, NULL_TREE
, void_type_node
);
1593 DECL_ARTIFICIAL (t
) = 1;
1594 DECL_IGNORED_P (t
) = 1;
1595 DECL_CONTEXT (t
) = decl
;
1596 DECL_RESULT (decl
) = t
;
1598 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1599 PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1600 DECL_ARTIFICIAL (t
) = 1;
1601 DECL_NAMELESS (t
) = 1;
1602 DECL_ARG_TYPE (t
) = ptr_type_node
;
1603 DECL_CONTEXT (t
) = current_function_decl
;
1605 DECL_ARGUMENTS (decl
) = t
;
1607 ctx
->receiver_decl
= t
;
1610 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1611 PARM_DECL
, get_identifier (".omp_data_o"),
1613 DECL_ARTIFICIAL (t
) = 1;
1614 DECL_NAMELESS (t
) = 1;
1615 DECL_ARG_TYPE (t
) = ptr_type_node
;
1616 DECL_CONTEXT (t
) = current_function_decl
;
1618 TREE_ADDRESSABLE (t
) = 1;
1619 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
1620 DECL_ARGUMENTS (decl
) = t
;
1623 /* Allocate memory for the function structure. The call to
1624 allocate_struct_function clobbers CFUN, so we need to restore
1626 push_struct_function (decl
);
1627 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
1632 /* Scan an OpenMP parallel directive. */
1635 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1639 gimple stmt
= gsi_stmt (*gsi
);
1641 /* Ignore parallel directives with empty bodies, unless there
1642 are copyin clauses. */
1644 && empty_body_p (gimple_omp_body (stmt
))
1645 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
1646 OMP_CLAUSE_COPYIN
) == NULL
)
1648 gsi_replace (gsi
, gimple_build_nop (), false);
1652 ctx
= new_omp_context (stmt
, outer_ctx
);
1653 if (taskreg_nesting_level
> 1)
1654 ctx
->is_nested
= true;
1655 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1656 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1657 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1658 name
= create_tmp_var_name (".omp_data_s");
1659 name
= build_decl (gimple_location (stmt
),
1660 TYPE_DECL
, name
, ctx
->record_type
);
1661 DECL_ARTIFICIAL (name
) = 1;
1662 DECL_NAMELESS (name
) = 1;
1663 TYPE_NAME (ctx
->record_type
) = name
;
1664 create_omp_child_function (ctx
, false);
1665 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1667 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
1668 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1670 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1671 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1674 layout_type (ctx
->record_type
);
1675 fixup_child_record_type (ctx
);
1679 /* Scan an OpenMP task directive. */
1682 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1686 gimple stmt
= gsi_stmt (*gsi
);
1687 location_t loc
= gimple_location (stmt
);
1689 /* Ignore task directives with empty bodies. */
1691 && empty_body_p (gimple_omp_body (stmt
)))
1693 gsi_replace (gsi
, gimple_build_nop (), false);
1697 ctx
= new_omp_context (stmt
, outer_ctx
);
1698 if (taskreg_nesting_level
> 1)
1699 ctx
->is_nested
= true;
1700 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1701 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1702 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1703 name
= create_tmp_var_name (".omp_data_s");
1704 name
= build_decl (gimple_location (stmt
),
1705 TYPE_DECL
, name
, ctx
->record_type
);
1706 DECL_ARTIFICIAL (name
) = 1;
1707 DECL_NAMELESS (name
) = 1;
1708 TYPE_NAME (ctx
->record_type
) = name
;
1709 create_omp_child_function (ctx
, false);
1710 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1712 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
1714 if (ctx
->srecord_type
)
1716 name
= create_tmp_var_name (".omp_data_a");
1717 name
= build_decl (gimple_location (stmt
),
1718 TYPE_DECL
, name
, ctx
->srecord_type
);
1719 DECL_ARTIFICIAL (name
) = 1;
1720 DECL_NAMELESS (name
) = 1;
1721 TYPE_NAME (ctx
->srecord_type
) = name
;
1722 create_omp_child_function (ctx
, true);
1725 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1727 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1729 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1730 t
= build_int_cst (long_integer_type_node
, 0);
1731 gimple_omp_task_set_arg_size (stmt
, t
);
1732 t
= build_int_cst (long_integer_type_node
, 1);
1733 gimple_omp_task_set_arg_align (stmt
, t
);
1737 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
1738 /* Move VLA fields to the end. */
1739 p
= &TYPE_FIELDS (ctx
->record_type
);
1741 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
1742 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
1745 *p
= TREE_CHAIN (*p
);
1746 TREE_CHAIN (*q
) = NULL_TREE
;
1747 q
= &TREE_CHAIN (*q
);
1750 p
= &DECL_CHAIN (*p
);
1752 layout_type (ctx
->record_type
);
1753 fixup_child_record_type (ctx
);
1754 if (ctx
->srecord_type
)
1755 layout_type (ctx
->srecord_type
);
1756 t
= fold_convert_loc (loc
, long_integer_type_node
,
1757 TYPE_SIZE_UNIT (ctx
->record_type
));
1758 gimple_omp_task_set_arg_size (stmt
, t
);
1759 t
= build_int_cst (long_integer_type_node
,
1760 TYPE_ALIGN_UNIT (ctx
->record_type
));
1761 gimple_omp_task_set_arg_align (stmt
, t
);
1766 /* Scan an OpenMP loop directive. */
1769 scan_omp_for (gimple stmt
, omp_context
*outer_ctx
)
1774 ctx
= new_omp_context (stmt
, outer_ctx
);
1776 scan_sharing_clauses (gimple_omp_for_clauses (stmt
), ctx
);
1778 scan_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
1779 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
1781 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
1782 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
1783 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
1784 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
1786 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1789 /* Scan an OpenMP sections directive. */
1792 scan_omp_sections (gimple stmt
, omp_context
*outer_ctx
)
1796 ctx
= new_omp_context (stmt
, outer_ctx
);
1797 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
1798 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1801 /* Scan an OpenMP single directive. */
1804 scan_omp_single (gimple stmt
, omp_context
*outer_ctx
)
1809 ctx
= new_omp_context (stmt
, outer_ctx
);
1810 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1811 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1812 name
= create_tmp_var_name (".omp_copy_s");
1813 name
= build_decl (gimple_location (stmt
),
1814 TYPE_DECL
, name
, ctx
->record_type
);
1815 TYPE_NAME (ctx
->record_type
) = name
;
1817 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
1818 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
1820 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1821 ctx
->record_type
= NULL
;
1823 layout_type (ctx
->record_type
);
1827 /* Check OpenMP nesting restrictions. */
1829 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
1831 switch (gimple_code (stmt
))
1833 case GIMPLE_OMP_FOR
:
1834 case GIMPLE_OMP_SECTIONS
:
1835 case GIMPLE_OMP_SINGLE
:
1837 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1838 switch (gimple_code (ctx
->stmt
))
1840 case GIMPLE_OMP_FOR
:
1841 case GIMPLE_OMP_SECTIONS
:
1842 case GIMPLE_OMP_SINGLE
:
1843 case GIMPLE_OMP_ORDERED
:
1844 case GIMPLE_OMP_MASTER
:
1845 case GIMPLE_OMP_TASK
:
1846 if (is_gimple_call (stmt
))
1848 error_at (gimple_location (stmt
),
1849 "barrier region may not be closely nested inside "
1850 "of work-sharing, critical, ordered, master or "
1851 "explicit task region");
1854 error_at (gimple_location (stmt
),
1855 "work-sharing region may not be closely nested inside "
1856 "of work-sharing, critical, ordered, master or explicit "
1859 case GIMPLE_OMP_PARALLEL
:
1865 case GIMPLE_OMP_MASTER
:
1866 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1867 switch (gimple_code (ctx
->stmt
))
1869 case GIMPLE_OMP_FOR
:
1870 case GIMPLE_OMP_SECTIONS
:
1871 case GIMPLE_OMP_SINGLE
:
1872 case GIMPLE_OMP_TASK
:
1873 error_at (gimple_location (stmt
),
1874 "master region may not be closely nested inside "
1875 "of work-sharing or explicit task region");
1877 case GIMPLE_OMP_PARALLEL
:
1883 case GIMPLE_OMP_ORDERED
:
1884 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1885 switch (gimple_code (ctx
->stmt
))
1887 case GIMPLE_OMP_CRITICAL
:
1888 case GIMPLE_OMP_TASK
:
1889 error_at (gimple_location (stmt
),
1890 "ordered region may not be closely nested inside "
1891 "of critical or explicit task region");
1893 case GIMPLE_OMP_FOR
:
1894 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
1895 OMP_CLAUSE_ORDERED
) == NULL
)
1897 error_at (gimple_location (stmt
),
1898 "ordered region must be closely nested inside "
1899 "a loop region with an ordered clause");
1903 case GIMPLE_OMP_PARALLEL
:
1909 case GIMPLE_OMP_CRITICAL
:
1910 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1911 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_CRITICAL
1912 && (gimple_omp_critical_name (stmt
)
1913 == gimple_omp_critical_name (ctx
->stmt
)))
1915 error_at (gimple_location (stmt
),
1916 "critical region may not be nested inside a critical "
1917 "region with the same name");
1928 /* Helper function scan_omp.
1930 Callback for walk_tree or operators in walk_gimple_stmt used to
1931 scan for OpenMP directives in TP. */
1934 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
1936 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
1937 omp_context
*ctx
= (omp_context
*) wi
->info
;
1940 switch (TREE_CODE (t
))
1947 *tp
= remap_decl (t
, &ctx
->cb
);
1951 if (ctx
&& TYPE_P (t
))
1952 *tp
= remap_type (t
, &ctx
->cb
);
1953 else if (!DECL_P (t
))
1958 tree tem
= remap_type (TREE_TYPE (t
), &ctx
->cb
);
1959 if (tem
!= TREE_TYPE (t
))
1961 if (TREE_CODE (t
) == INTEGER_CST
)
1962 *tp
= build_int_cst_wide (tem
,
1963 TREE_INT_CST_LOW (t
),
1964 TREE_INT_CST_HIGH (t
));
1966 TREE_TYPE (t
) = tem
;
1977 /* Helper function for scan_omp.
1979 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1980 the current statement in GSI. */
1983 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1984 struct walk_stmt_info
*wi
)
1986 gimple stmt
= gsi_stmt (*gsi
);
1987 omp_context
*ctx
= (omp_context
*) wi
->info
;
1989 if (gimple_has_location (stmt
))
1990 input_location
= gimple_location (stmt
);
1992 /* Check the OpenMP nesting restrictions. */
1995 bool remove
= false;
1996 if (is_gimple_omp (stmt
))
1997 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
1998 else if (is_gimple_call (stmt
))
2000 tree fndecl
= gimple_call_fndecl (stmt
);
2001 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
2002 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
2003 remove
= !check_omp_nesting_restrictions (stmt
, ctx
);
2007 stmt
= gimple_build_nop ();
2008 gsi_replace (gsi
, stmt
, false);
2012 *handled_ops_p
= true;
2014 switch (gimple_code (stmt
))
2016 case GIMPLE_OMP_PARALLEL
:
2017 taskreg_nesting_level
++;
2018 scan_omp_parallel (gsi
, ctx
);
2019 taskreg_nesting_level
--;
2022 case GIMPLE_OMP_TASK
:
2023 taskreg_nesting_level
++;
2024 scan_omp_task (gsi
, ctx
);
2025 taskreg_nesting_level
--;
2028 case GIMPLE_OMP_FOR
:
2029 scan_omp_for (stmt
, ctx
);
2032 case GIMPLE_OMP_SECTIONS
:
2033 scan_omp_sections (stmt
, ctx
);
2036 case GIMPLE_OMP_SINGLE
:
2037 scan_omp_single (stmt
, ctx
);
2040 case GIMPLE_OMP_SECTION
:
2041 case GIMPLE_OMP_MASTER
:
2042 case GIMPLE_OMP_ORDERED
:
2043 case GIMPLE_OMP_CRITICAL
:
2044 ctx
= new_omp_context (stmt
, ctx
);
2045 scan_omp (gimple_omp_body_ptr (stmt
), ctx
);
2052 *handled_ops_p
= false;
2054 for (var
= gimple_bind_vars (stmt
); var
; var
= DECL_CHAIN (var
))
2055 insert_decl_map (&ctx
->cb
, var
, var
);
2059 *handled_ops_p
= false;
2067 /* Scan all the statements starting at the current statement. CTX
2068 contains context information about the OpenMP directives and
2069 clauses found during the scan. */
2072 scan_omp (gimple_seq
*body_p
, omp_context
*ctx
)
2074 location_t saved_location
;
2075 struct walk_stmt_info wi
;
2077 memset (&wi
, 0, sizeof (wi
));
2079 wi
.want_locations
= true;
2081 saved_location
= input_location
;
2082 walk_gimple_seq_mod (body_p
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
2083 input_location
= saved_location
;
2086 /* Re-gimplification and code generation routines. */
2088 /* Build a call to GOMP_barrier. */
2091 build_omp_barrier (void)
2093 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER
), 0);
2096 /* If a context was created for STMT when it was scanned, return it. */
2098 static omp_context
*
2099 maybe_lookup_ctx (gimple stmt
)
2102 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
2103 return n
? (omp_context
*) n
->value
: NULL
;
2107 /* Find the mapping for DECL in CTX or the immediately enclosing
2108 context that has a mapping for DECL.
2110 If CTX is a nested parallel directive, we may have to use the decl
2111 mappings created in CTX's parent context. Suppose that we have the
2112 following parallel nesting (variable UIDs showed for clarity):
2115 #omp parallel shared(iD.1562) -> outer parallel
2116 iD.1562 = iD.1562 + 1;
2118 #omp parallel shared (iD.1562) -> inner parallel
2119 iD.1562 = iD.1562 - 1;
2121 Each parallel structure will create a distinct .omp_data_s structure
2122 for copying iD.1562 in/out of the directive:
2124 outer parallel .omp_data_s.1.i -> iD.1562
2125 inner parallel .omp_data_s.2.i -> iD.1562
2127 A shared variable mapping will produce a copy-out operation before
2128 the parallel directive and a copy-in operation after it. So, in
2129 this case we would have:
2132 .omp_data_o.1.i = iD.1562;
2133 #omp parallel shared(iD.1562) -> outer parallel
2134 .omp_data_i.1 = &.omp_data_o.1
2135 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2137 .omp_data_o.2.i = iD.1562; -> **
2138 #omp parallel shared(iD.1562) -> inner parallel
2139 .omp_data_i.2 = &.omp_data_o.2
2140 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2143 ** This is a problem. The symbol iD.1562 cannot be referenced
2144 inside the body of the outer parallel region. But since we are
2145 emitting this copy operation while expanding the inner parallel
2146 directive, we need to access the CTX structure of the outer
2147 parallel directive to get the correct mapping:
2149 .omp_data_o.2.i = .omp_data_i.1->i
2151 Since there may be other workshare or parallel directives enclosing
2152 the parallel directive, it may be necessary to walk up the context
2153 parent chain. This is not a problem in general because nested
2154 parallelism happens only rarely. */
2157 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2162 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2163 t
= maybe_lookup_decl (decl
, up
);
2165 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
2167 return t
? t
: decl
;
2171 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2172 in outer contexts. */
2175 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2180 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2181 t
= maybe_lookup_decl (decl
, up
);
2183 return t
? t
: decl
;
2187 /* Construct the initialization value for reduction CLAUSE. */
2190 omp_reduction_init (tree clause
, tree type
)
2192 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
2193 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
2200 case TRUTH_ORIF_EXPR
:
2201 case TRUTH_XOR_EXPR
:
2203 return build_zero_cst (type
);
2206 case TRUTH_AND_EXPR
:
2207 case TRUTH_ANDIF_EXPR
:
2209 return fold_convert_loc (loc
, type
, integer_one_node
);
2212 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
2215 if (SCALAR_FLOAT_TYPE_P (type
))
2217 REAL_VALUE_TYPE max
, min
;
2218 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2221 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
2224 real_maxval (&min
, 1, TYPE_MODE (type
));
2225 return build_real (type
, min
);
2229 gcc_assert (INTEGRAL_TYPE_P (type
));
2230 return TYPE_MIN_VALUE (type
);
2234 if (SCALAR_FLOAT_TYPE_P (type
))
2236 REAL_VALUE_TYPE max
;
2237 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2240 real_maxval (&max
, 0, TYPE_MODE (type
));
2241 return build_real (type
, max
);
2245 gcc_assert (INTEGRAL_TYPE_P (type
));
2246 return TYPE_MAX_VALUE (type
);
2254 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2255 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2256 private variables. Initialization statements go in ILIST, while calls
2257 to destructors go in DLIST. */
2260 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
2263 tree c
, dtor
, copyin_seq
, x
, ptr
;
2264 bool copyin_by_ref
= false;
2265 bool lastprivate_firstprivate
= false;
2270 /* Do all the fixed sized types in the first pass, and the variable sized
2271 types in the second pass. This makes sure that the scalar arguments to
2272 the variable sized types are processed before we use them in the
2273 variable sized operations. */
2274 for (pass
= 0; pass
< 2; ++pass
)
2276 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2278 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
2281 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2285 case OMP_CLAUSE_PRIVATE
:
2286 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
2289 case OMP_CLAUSE_SHARED
:
2290 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
2292 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
2295 case OMP_CLAUSE_FIRSTPRIVATE
:
2296 case OMP_CLAUSE_COPYIN
:
2297 case OMP_CLAUSE_REDUCTION
:
2299 case OMP_CLAUSE_LASTPRIVATE
:
2300 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2302 lastprivate_firstprivate
= true;
2311 new_var
= var
= OMP_CLAUSE_DECL (c
);
2312 if (c_kind
!= OMP_CLAUSE_COPYIN
)
2313 new_var
= lookup_decl (var
, ctx
);
2315 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
2320 else if (is_variable_sized (var
))
2322 /* For variable sized types, we need to allocate the
2323 actual storage here. Call alloca and store the
2324 result in the pointer decl that we created elsewhere. */
2328 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
2333 ptr
= DECL_VALUE_EXPR (new_var
);
2334 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
2335 ptr
= TREE_OPERAND (ptr
, 0);
2336 gcc_assert (DECL_P (ptr
));
2337 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
2339 /* void *tmp = __builtin_alloca */
2340 atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2341 stmt
= gimple_build_call (atmp
, 1, x
);
2342 tmp
= create_tmp_var_raw (ptr_type_node
, NULL
);
2343 gimple_add_tmp_var (tmp
);
2344 gimple_call_set_lhs (stmt
, tmp
);
2346 gimple_seq_add_stmt (ilist
, stmt
);
2348 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
2349 gimplify_assign (ptr
, x
, ilist
);
2352 else if (is_reference (var
))
2354 /* For references that are being privatized for Fortran,
2355 allocate new backing storage for the new pointer
2356 variable. This allows us to avoid changing all the
2357 code that expects a pointer to something that expects
2358 a direct variable. Note that this doesn't apply to
2359 C++, since reference types are disallowed in data
2360 sharing clauses there, except for NRV optimized
2365 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
2366 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
2368 x
= build_receiver_ref (var
, false, ctx
);
2369 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2371 else if (TREE_CONSTANT (x
))
2373 const char *name
= NULL
;
2374 if (DECL_NAME (var
))
2375 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
2377 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
2379 gimple_add_tmp_var (x
);
2380 TREE_ADDRESSABLE (x
) = 1;
2381 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2385 tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA
);
2386 x
= build_call_expr_loc (clause_loc
, atmp
, 1, x
);
2389 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
2390 gimplify_assign (new_var
, x
, ilist
);
2392 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2394 else if (c_kind
== OMP_CLAUSE_REDUCTION
2395 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2403 switch (OMP_CLAUSE_CODE (c
))
2405 case OMP_CLAUSE_SHARED
:
2406 /* Shared global vars are just accessed directly. */
2407 if (is_global_var (new_var
))
2409 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2410 needs to be delayed until after fixup_child_record_type so
2411 that we get the correct type during the dereference. */
2412 by_ref
= use_pointer_for_field (var
, ctx
);
2413 x
= build_receiver_ref (var
, by_ref
, ctx
);
2414 SET_DECL_VALUE_EXPR (new_var
, x
);
2415 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2417 /* ??? If VAR is not passed by reference, and the variable
2418 hasn't been initialized yet, then we'll get a warning for
2419 the store into the omp_data_s structure. Ideally, we'd be
2420 able to notice this and not store anything at all, but
2421 we're generating code too early. Suppress the warning. */
2423 TREE_NO_WARNING (var
) = 1;
2426 case OMP_CLAUSE_LASTPRIVATE
:
2427 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2431 case OMP_CLAUSE_PRIVATE
:
2432 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
2433 x
= build_outer_var_ref (var
, ctx
);
2434 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2436 if (is_task_ctx (ctx
))
2437 x
= build_receiver_ref (var
, false, ctx
);
2439 x
= build_outer_var_ref (var
, ctx
);
2443 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
2445 gimplify_and_add (x
, ilist
);
2449 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
2452 gimple_seq tseq
= NULL
;
2455 gimplify_stmt (&dtor
, &tseq
);
2456 gimple_seq_add_seq (dlist
, tseq
);
2460 case OMP_CLAUSE_FIRSTPRIVATE
:
2461 if (is_task_ctx (ctx
))
2463 if (is_reference (var
) || is_variable_sized (var
))
2465 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
2467 || use_pointer_for_field (var
, NULL
))
2469 x
= build_receiver_ref (var
, false, ctx
);
2470 SET_DECL_VALUE_EXPR (new_var
, x
);
2471 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2475 x
= build_outer_var_ref (var
, ctx
);
2476 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
2477 gimplify_and_add (x
, ilist
);
2481 case OMP_CLAUSE_COPYIN
:
2482 by_ref
= use_pointer_for_field (var
, NULL
);
2483 x
= build_receiver_ref (var
, by_ref
, ctx
);
2484 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
2485 append_to_statement_list (x
, ©in_seq
);
2486 copyin_by_ref
|= by_ref
;
2489 case OMP_CLAUSE_REDUCTION
:
2490 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2492 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2493 x
= build_outer_var_ref (var
, ctx
);
2495 if (is_reference (var
))
2496 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2497 SET_DECL_VALUE_EXPR (placeholder
, x
);
2498 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2499 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
2500 gimple_seq_add_seq (ilist
,
2501 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
));
2502 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
2503 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
2507 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
2508 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
2509 gimplify_assign (new_var
, x
, ilist
);
2519 /* The copyin sequence is not to be executed by the main thread, since
2520 that would result in self-copies. Perhaps not visible to scalars,
2521 but it certainly is to C++ operator=. */
2524 x
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
),
2526 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
2527 build_int_cst (TREE_TYPE (x
), 0));
2528 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
2529 gimplify_and_add (x
, ilist
);
2532 /* If any copyin variable is passed by reference, we must ensure the
2533 master thread doesn't modify it before it is copied over in all
2534 threads. Similarly for variables in both firstprivate and
2535 lastprivate clauses we need to ensure the lastprivate copying
2536 happens after firstprivate copying in all threads. */
2537 if (copyin_by_ref
|| lastprivate_firstprivate
)
2538 gimplify_and_add (build_omp_barrier (), ilist
);
2542 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2543 both parallel and workshare constructs. PREDICATE may be NULL if it's
2547 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
2550 tree x
, c
, label
= NULL
;
2551 bool par_clauses
= false;
2553 /* Early exit if there are no lastprivate clauses. */
2554 clauses
= find_omp_clause (clauses
, OMP_CLAUSE_LASTPRIVATE
);
2555 if (clauses
== NULL
)
2557 /* If this was a workshare clause, see if it had been combined
2558 with its parallel. In that case, look for the clauses on the
2559 parallel statement itself. */
2560 if (is_parallel_ctx (ctx
))
2564 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2567 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2568 OMP_CLAUSE_LASTPRIVATE
);
2569 if (clauses
== NULL
)
2577 tree label_true
, arm1
, arm2
;
2579 label
= create_artificial_label (UNKNOWN_LOCATION
);
2580 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
2581 arm1
= TREE_OPERAND (predicate
, 0);
2582 arm2
= TREE_OPERAND (predicate
, 1);
2583 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2584 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2585 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
2587 gimple_seq_add_stmt (stmt_list
, stmt
);
2588 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
2591 for (c
= clauses
; c
;)
2594 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2596 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
2598 var
= OMP_CLAUSE_DECL (c
);
2599 new_var
= lookup_decl (var
, ctx
);
2601 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
2603 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
2604 gimple_seq_add_seq (stmt_list
,
2605 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
2607 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
2609 x
= build_outer_var_ref (var
, ctx
);
2610 if (is_reference (var
))
2611 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2612 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
2613 gimplify_and_add (x
, stmt_list
);
2615 c
= OMP_CLAUSE_CHAIN (c
);
2616 if (c
== NULL
&& !par_clauses
)
2618 /* If this was a workshare clause, see if it had been combined
2619 with its parallel. In that case, continue looking for the
2620 clauses also on the parallel statement itself. */
2621 if (is_parallel_ctx (ctx
))
2625 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2628 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2629 OMP_CLAUSE_LASTPRIVATE
);
2635 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
2639 /* Generate code to implement the REDUCTION clauses. */
2642 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
2644 gimple_seq sub_seq
= NULL
;
2649 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2650 update in that case, otherwise use a lock. */
2651 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
2652 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
2654 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2656 /* Never use OMP_ATOMIC for array reductions. */
2666 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2668 tree var
, ref
, new_var
;
2669 enum tree_code code
;
2670 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2672 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
2675 var
= OMP_CLAUSE_DECL (c
);
2676 new_var
= lookup_decl (var
, ctx
);
2677 if (is_reference (var
))
2678 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2679 ref
= build_outer_var_ref (var
, ctx
);
2680 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
2682 /* reduction(-:var) sums up the partial results, so it acts
2683 identically to reduction(+:var). */
2684 if (code
== MINUS_EXPR
)
2689 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
2691 addr
= save_expr (addr
);
2692 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
2693 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
2694 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
2695 gimplify_and_add (x
, stmt_seqp
);
2699 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2701 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2703 if (is_reference (var
))
2704 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
2705 SET_DECL_VALUE_EXPR (placeholder
, ref
);
2706 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2707 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
2708 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
2709 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
2710 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
2714 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
2715 ref
= build_outer_var_ref (var
, ctx
);
2716 gimplify_assign (ref
, x
, &sub_seq
);
2720 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
),
2722 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2724 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
2726 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
),
2728 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2732 /* Generate code to implement the COPYPRIVATE clauses. */
2735 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
2740 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2742 tree var
, new_var
, ref
, x
;
2744 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2746 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
2749 var
= OMP_CLAUSE_DECL (c
);
2750 by_ref
= use_pointer_for_field (var
, NULL
);
2752 ref
= build_sender_ref (var
, ctx
);
2753 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
2756 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
2757 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
2759 gimplify_assign (ref
, x
, slist
);
2761 ref
= build_receiver_ref (var
, false, ctx
);
2764 ref
= fold_convert_loc (clause_loc
,
2765 build_pointer_type (TREE_TYPE (new_var
)),
2767 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
2769 if (is_reference (var
))
2771 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
2772 ref
= build_simple_mem_ref_loc (clause_loc
, ref
);
2773 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2775 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
2776 gimplify_and_add (x
, rlist
);
2781 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2782 and REDUCTION from the sender (aka parent) side. */
2785 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
2790 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2792 tree val
, ref
, x
, var
;
2793 bool by_ref
, do_in
= false, do_out
= false;
2794 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2796 switch (OMP_CLAUSE_CODE (c
))
2798 case OMP_CLAUSE_PRIVATE
:
2799 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2802 case OMP_CLAUSE_FIRSTPRIVATE
:
2803 case OMP_CLAUSE_COPYIN
:
2804 case OMP_CLAUSE_LASTPRIVATE
:
2805 case OMP_CLAUSE_REDUCTION
:
2811 val
= OMP_CLAUSE_DECL (c
);
2812 var
= lookup_decl_in_outer_ctx (val
, ctx
);
2814 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
2815 && is_global_var (var
))
2817 if (is_variable_sized (val
))
2819 by_ref
= use_pointer_for_field (val
, NULL
);
2821 switch (OMP_CLAUSE_CODE (c
))
2823 case OMP_CLAUSE_PRIVATE
:
2824 case OMP_CLAUSE_FIRSTPRIVATE
:
2825 case OMP_CLAUSE_COPYIN
:
2829 case OMP_CLAUSE_LASTPRIVATE
:
2830 if (by_ref
|| is_reference (val
))
2832 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2839 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
2844 case OMP_CLAUSE_REDUCTION
:
2846 do_out
= !(by_ref
|| is_reference (val
));
2855 ref
= build_sender_ref (val
, ctx
);
2856 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
2857 gimplify_assign (ref
, x
, ilist
);
2858 if (is_task_ctx (ctx
))
2859 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
2864 ref
= build_sender_ref (val
, ctx
);
2865 gimplify_assign (var
, ref
, olist
);
2870 /* Generate code to implement SHARED from the sender (aka parent)
2871 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2872 list things that got automatically shared. */
2875 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
2877 tree var
, ovar
, nvar
, f
, x
, record_type
;
2879 if (ctx
->record_type
== NULL
)
2882 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
2883 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
2885 ovar
= DECL_ABSTRACT_ORIGIN (f
);
2886 nvar
= maybe_lookup_decl (ovar
, ctx
);
2887 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
2890 /* If CTX is a nested parallel directive. Find the immediately
2891 enclosing parallel or workshare construct that contains a
2892 mapping for OVAR. */
2893 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
2895 if (use_pointer_for_field (ovar
, ctx
))
2897 x
= build_sender_ref (ovar
, ctx
);
2898 var
= build_fold_addr_expr (var
);
2899 gimplify_assign (x
, var
, ilist
);
2903 x
= build_sender_ref (ovar
, ctx
);
2904 gimplify_assign (x
, var
, ilist
);
2906 if (!TREE_READONLY (var
)
2907 /* We don't need to receive a new reference to a result
2908 or parm decl. In fact we may not store to it as we will
2909 invalidate any pending RSO and generate wrong gimple
2911 && !((TREE_CODE (var
) == RESULT_DECL
2912 || TREE_CODE (var
) == PARM_DECL
)
2913 && DECL_BY_REFERENCE (var
)))
2915 x
= build_sender_ref (ovar
, ctx
);
2916 gimplify_assign (var
, x
, olist
);
2923 /* A convenience function to build an empty GIMPLE_COND with just the
2927 gimple_build_cond_empty (tree cond
)
2929 enum tree_code pred_code
;
2932 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
2933 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
2937 /* Build the function calls to GOMP_parallel_start etc to actually
2938 generate the parallel operation. REGION is the parallel region
2939 being expanded. BB is the block where to insert the code. WS_ARGS
2940 will be set if this is a call to a combined parallel+workshare
2941 construct, it contains the list of additional arguments needed by
2942 the workshare construct. */
2945 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
2946 gimple entry_stmt
, vec
<tree
, va_gc
> *ws_args
)
2948 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
2949 gimple_stmt_iterator gsi
;
2951 enum built_in_function start_ix
;
2953 location_t clause_loc
;
2954 vec
<tree
, va_gc
> *args
;
2956 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
2958 /* Determine what flavor of GOMP_parallel_start we will be
2960 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
2961 if (is_combined_parallel (region
))
2963 switch (region
->inner
->type
)
2965 case GIMPLE_OMP_FOR
:
2966 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
2967 start_ix2
= ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2968 + (region
->inner
->sched_kind
2969 == OMP_CLAUSE_SCHEDULE_RUNTIME
2970 ? 3 : region
->inner
->sched_kind
));
2971 start_ix
= (enum built_in_function
)start_ix2
;
2973 case GIMPLE_OMP_SECTIONS
:
2974 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
2981 /* By default, the value of NUM_THREADS is zero (selected at run time)
2982 and there is no conditional. */
2984 val
= build_int_cst (unsigned_type_node
, 0);
2986 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
2988 cond
= OMP_CLAUSE_IF_EXPR (c
);
2990 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
2993 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
2994 clause_loc
= OMP_CLAUSE_LOCATION (c
);
2997 clause_loc
= gimple_location (entry_stmt
);
2999 /* Ensure 'val' is of the correct type. */
3000 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
3002 /* If we found the clause 'if (cond)', build either
3003 (cond != 0) or (cond ? val : 1u). */
3006 gimple_stmt_iterator gsi
;
3008 cond
= gimple_boolify (cond
);
3010 if (integer_zerop (val
))
3011 val
= fold_build2_loc (clause_loc
,
3012 EQ_EXPR
, unsigned_type_node
, cond
,
3013 build_int_cst (TREE_TYPE (cond
), 0));
3016 basic_block cond_bb
, then_bb
, else_bb
;
3017 edge e
, e_then
, e_else
;
3018 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
3020 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
3021 if (gimple_in_ssa_p (cfun
))
3023 tmp_then
= make_ssa_name (tmp_var
, NULL
);
3024 tmp_else
= make_ssa_name (tmp_var
, NULL
);
3025 tmp_join
= make_ssa_name (tmp_var
, NULL
);
3034 e
= split_block (bb
, NULL
);
3039 then_bb
= create_empty_bb (cond_bb
);
3040 else_bb
= create_empty_bb (then_bb
);
3041 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
3042 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
3044 stmt
= gimple_build_cond_empty (cond
);
3045 gsi
= gsi_start_bb (cond_bb
);
3046 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3048 gsi
= gsi_start_bb (then_bb
);
3049 stmt
= gimple_build_assign (tmp_then
, val
);
3050 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3052 gsi
= gsi_start_bb (else_bb
);
3053 stmt
= gimple_build_assign
3054 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
3055 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3057 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
3058 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
3059 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
3060 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
3062 if (gimple_in_ssa_p (cfun
))
3064 gimple phi
= create_phi_node (tmp_join
, bb
);
3065 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
3066 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
3072 gsi
= gsi_start_bb (bb
);
3073 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
3074 false, GSI_CONTINUE_LINKING
);
3077 gsi
= gsi_last_bb (bb
);
3078 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3080 t1
= null_pointer_node
;
3082 t1
= build_fold_addr_expr (t
);
3083 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
3085 vec_alloc (args
, 3 + vec_safe_length (ws_args
));
3086 args
->quick_push (t2
);
3087 args
->quick_push (t1
);
3088 args
->quick_push (val
);
3090 args
->splice (*ws_args
);
3092 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
3093 builtin_decl_explicit (start_ix
), args
);
3095 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3096 false, GSI_CONTINUE_LINKING
);
3098 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3100 t
= null_pointer_node
;
3102 t
= build_fold_addr_expr (t
);
3103 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3104 gimple_omp_parallel_child_fn (entry_stmt
), 1, t
);
3105 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3106 false, GSI_CONTINUE_LINKING
);
3108 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3109 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END
),
3111 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3112 false, GSI_CONTINUE_LINKING
);
3116 /* Build the function call to GOMP_task to actually
3117 generate the task operation. BB is the block where to insert the code. */
3120 expand_task_call (basic_block bb
, gimple entry_stmt
)
3122 tree t
, t1
, t2
, t3
, flags
, cond
, c
, c2
, clauses
;
3123 gimple_stmt_iterator gsi
;
3124 location_t loc
= gimple_location (entry_stmt
);
3126 clauses
= gimple_omp_task_clauses (entry_stmt
);
3128 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
3130 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
3132 cond
= boolean_true_node
;
3134 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
3135 c2
= find_omp_clause (clauses
, OMP_CLAUSE_MERGEABLE
);
3136 flags
= build_int_cst (unsigned_type_node
,
3137 (c
? 1 : 0) + (c2
? 4 : 0));
3139 c
= find_omp_clause (clauses
, OMP_CLAUSE_FINAL
);
3142 c
= gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c
));
3143 c
= fold_build3_loc (loc
, COND_EXPR
, unsigned_type_node
, c
,
3144 build_int_cst (unsigned_type_node
, 2),
3145 build_int_cst (unsigned_type_node
, 0));
3146 flags
= fold_build2_loc (loc
, PLUS_EXPR
, unsigned_type_node
, flags
, c
);
3149 gsi
= gsi_last_bb (bb
);
3150 t
= gimple_omp_task_data_arg (entry_stmt
);
3152 t2
= null_pointer_node
;
3154 t2
= build_fold_addr_expr_loc (loc
, t
);
3155 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
3156 t
= gimple_omp_task_copy_fn (entry_stmt
);
3158 t3
= null_pointer_node
;
3160 t3
= build_fold_addr_expr_loc (loc
, t
);
3162 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK
),
3164 gimple_omp_task_arg_size (entry_stmt
),
3165 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
);
3167 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3168 false, GSI_CONTINUE_LINKING
);
3172 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3173 catch handler and return it. This prevents programs from violating the
3174 structured block semantics with throws. */
3177 maybe_catch_exception (gimple_seq body
)
3182 if (!flag_exceptions
)
3185 if (lang_hooks
.eh_protect_cleanup_actions
!= NULL
)
3186 decl
= lang_hooks
.eh_protect_cleanup_actions ();
3188 decl
= builtin_decl_explicit (BUILT_IN_TRAP
);
3190 g
= gimple_build_eh_must_not_throw (decl
);
3191 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
3194 return gimple_seq_alloc_with_stmt (g
);
3197 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3200 vec2chain (vec
<tree
, va_gc
> *v
)
3202 tree chain
= NULL_TREE
, t
;
3205 FOR_EACH_VEC_SAFE_ELT_REVERSE (v
, ix
, t
)
3207 DECL_CHAIN (t
) = chain
;
3215 /* Remove barriers in REGION->EXIT's block. Note that this is only
3216 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3217 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3218 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3222 remove_exit_barrier (struct omp_region
*region
)
3224 gimple_stmt_iterator gsi
;
3225 basic_block exit_bb
;
3229 int any_addressable_vars
= -1;
3231 exit_bb
= region
->exit
;
3233 /* If the parallel region doesn't return, we don't have REGION->EXIT
3238 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3239 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3240 statements that can appear in between are extremely limited -- no
3241 memory operations at all. Here, we allow nothing at all, so the
3242 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3243 gsi
= gsi_last_bb (exit_bb
);
3244 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3246 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
3249 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
3251 gsi
= gsi_last_bb (e
->src
);
3252 if (gsi_end_p (gsi
))
3254 stmt
= gsi_stmt (gsi
);
3255 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
3256 && !gimple_omp_return_nowait_p (stmt
))
3258 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3259 in many cases. If there could be tasks queued, the barrier
3260 might be needed to let the tasks run before some local
3261 variable of the parallel that the task uses as shared
3262 runs out of scope. The task can be spawned either
3263 from within current function (this would be easy to check)
3264 or from some function it calls and gets passed an address
3265 of such a variable. */
3266 if (any_addressable_vars
< 0)
3268 gimple parallel_stmt
= last_stmt (region
->entry
);
3269 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
3270 tree local_decls
, block
, decl
;
3273 any_addressable_vars
= 0;
3274 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
3275 if (TREE_ADDRESSABLE (decl
))
3277 any_addressable_vars
= 1;
3280 for (block
= gimple_block (stmt
);
3281 !any_addressable_vars
3283 && TREE_CODE (block
) == BLOCK
;
3284 block
= BLOCK_SUPERCONTEXT (block
))
3286 for (local_decls
= BLOCK_VARS (block
);
3288 local_decls
= DECL_CHAIN (local_decls
))
3289 if (TREE_ADDRESSABLE (local_decls
))
3291 any_addressable_vars
= 1;
3294 if (block
== gimple_block (parallel_stmt
))
3298 if (!any_addressable_vars
)
3299 gimple_omp_return_set_nowait (stmt
);
3305 remove_exit_barriers (struct omp_region
*region
)
3307 if (region
->type
== GIMPLE_OMP_PARALLEL
)
3308 remove_exit_barrier (region
);
3312 region
= region
->inner
;
3313 remove_exit_barriers (region
);
3314 while (region
->next
)
3316 region
= region
->next
;
3317 remove_exit_barriers (region
);
3322 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3323 calls. These can't be declared as const functions, but
3324 within one parallel body they are constant, so they can be
3325 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3326 which are declared const. Similarly for task body, except
3327 that in untied task omp_get_thread_num () can change at any task
3328 scheduling point. */
3331 optimize_omp_library_calls (gimple entry_stmt
)
3334 gimple_stmt_iterator gsi
;
3335 tree thr_num_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3336 tree thr_num_id
= DECL_ASSEMBLER_NAME (thr_num_tree
);
3337 tree num_thr_tree
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3338 tree num_thr_id
= DECL_ASSEMBLER_NAME (num_thr_tree
);
3339 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
3340 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
3341 OMP_CLAUSE_UNTIED
) != NULL
);
3344 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3346 gimple call
= gsi_stmt (gsi
);
3349 if (is_gimple_call (call
)
3350 && (decl
= gimple_call_fndecl (call
))
3351 && DECL_EXTERNAL (decl
)
3352 && TREE_PUBLIC (decl
)
3353 && DECL_INITIAL (decl
) == NULL
)
3357 if (DECL_NAME (decl
) == thr_num_id
)
3359 /* In #pragma omp task untied omp_get_thread_num () can change
3360 during the execution of the task region. */
3363 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
3365 else if (DECL_NAME (decl
) == num_thr_id
)
3366 built_in
= builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
);
3370 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
3371 || gimple_call_num_args (call
) != 0)
3374 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
3377 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
3378 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
3379 TREE_TYPE (TREE_TYPE (built_in
))))
3382 gimple_call_set_fndecl (call
, built_in
);
3387 /* Expand the OpenMP parallel or task directive starting at REGION. */
3390 expand_omp_taskreg (struct omp_region
*region
)
3392 basic_block entry_bb
, exit_bb
, new_bb
;
3393 struct function
*child_cfun
;
3394 tree child_fn
, block
, t
;
3395 gimple_stmt_iterator gsi
;
3396 gimple entry_stmt
, stmt
;
3398 vec
<tree
, va_gc
> *ws_args
;
3400 entry_stmt
= last_stmt (region
->entry
);
3401 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
3402 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
3404 entry_bb
= region
->entry
;
3405 exit_bb
= region
->exit
;
3407 if (is_combined_parallel (region
))
3408 ws_args
= region
->ws_args
;
3412 if (child_cfun
->cfg
)
3414 /* Due to inlining, it may happen that we have already outlined
3415 the region, in which case all we need to do is make the
3416 sub-graph unreachable and emit the parallel call. */
3417 edge entry_succ_e
, exit_succ_e
;
3418 gimple_stmt_iterator gsi
;
3420 entry_succ_e
= single_succ_edge (entry_bb
);
3422 gsi
= gsi_last_bb (entry_bb
);
3423 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
3424 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
3425 gsi_remove (&gsi
, true);
3430 exit_succ_e
= single_succ_edge (exit_bb
);
3431 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
3433 remove_edge_and_dominated_blocks (entry_succ_e
);
3437 unsigned srcidx
, dstidx
, num
;
3439 /* If the parallel region needs data sent from the parent
3440 function, then the very first statement (except possible
3441 tree profile counter updates) of the parallel body
3442 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3443 &.OMP_DATA_O is passed as an argument to the child function,
3444 we need to replace it with the argument as seen by the child
3447 In most cases, this will end up being the identity assignment
3448 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3449 a function call that has been inlined, the original PARM_DECL
3450 .OMP_DATA_I may have been converted into a different local
3451 variable. In which case, we need to keep the assignment. */
3452 if (gimple_omp_taskreg_data_arg (entry_stmt
))
3454 basic_block entry_succ_bb
= single_succ (entry_bb
);
3455 gimple_stmt_iterator gsi
;
3457 gimple parcopy_stmt
= NULL
;
3459 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
3463 gcc_assert (!gsi_end_p (gsi
));
3464 stmt
= gsi_stmt (gsi
);
3465 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3468 if (gimple_num_ops (stmt
) == 2)
3470 tree arg
= gimple_assign_rhs1 (stmt
);
3472 /* We're ignore the subcode because we're
3473 effectively doing a STRIP_NOPS. */
3475 if (TREE_CODE (arg
) == ADDR_EXPR
3476 && TREE_OPERAND (arg
, 0)
3477 == gimple_omp_taskreg_data_arg (entry_stmt
))
3479 parcopy_stmt
= stmt
;
3485 gcc_assert (parcopy_stmt
!= NULL
);
3486 arg
= DECL_ARGUMENTS (child_fn
);
3488 if (!gimple_in_ssa_p (cfun
))
3490 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
3491 gsi_remove (&gsi
, true);
3494 /* ?? Is setting the subcode really necessary ?? */
3495 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
3496 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
3501 /* If we are in ssa form, we must load the value from the default
3502 definition of the argument. That should not be defined now,
3503 since the argument is not used uninitialized. */
3504 gcc_assert (ssa_default_def (cfun
, arg
) == NULL
);
3505 narg
= make_ssa_name (arg
, gimple_build_nop ());
3506 set_ssa_default_def (cfun
, arg
, narg
);
3507 /* ?? Is setting the subcode really necessary ?? */
3508 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
3509 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
3510 update_stmt (parcopy_stmt
);
3514 /* Declare local variables needed in CHILD_CFUN. */
3515 block
= DECL_INITIAL (child_fn
);
3516 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
3517 /* The gimplifier could record temporaries in parallel/task block
3518 rather than in containing function's local_decls chain,
3519 which would mean cgraph missed finalizing them. Do it now. */
3520 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
3521 if (TREE_CODE (t
) == VAR_DECL
3523 && !DECL_EXTERNAL (t
))
3524 varpool_finalize_decl (t
);
3525 DECL_SAVED_TREE (child_fn
) = NULL
;
3526 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3527 gimple_set_body (child_fn
, NULL
);
3528 TREE_USED (block
) = 1;
3530 /* Reset DECL_CONTEXT on function arguments. */
3531 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
3532 DECL_CONTEXT (t
) = child_fn
;
3534 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3535 so that it can be moved to the child function. */
3536 gsi
= gsi_last_bb (entry_bb
);
3537 stmt
= gsi_stmt (gsi
);
3538 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
3539 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
3540 gsi_remove (&gsi
, true);
3541 e
= split_block (entry_bb
, stmt
);
3543 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3545 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3548 gsi
= gsi_last_bb (exit_bb
);
3549 gcc_assert (!gsi_end_p (gsi
)
3550 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3551 stmt
= gimple_build_return (NULL
);
3552 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
3553 gsi_remove (&gsi
, true);
3556 /* Move the parallel region into CHILD_CFUN. */
3558 if (gimple_in_ssa_p (cfun
))
3560 init_tree_ssa (child_cfun
);
3561 init_ssa_operands (child_cfun
);
3562 child_cfun
->gimple_df
->in_ssa_p
= true;
3566 block
= gimple_block (entry_stmt
);
3568 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
3570 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
3572 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3573 num
= vec_safe_length (child_cfun
->local_decls
);
3574 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
3576 t
= (*child_cfun
->local_decls
)[srcidx
];
3577 if (DECL_CONTEXT (t
) == cfun
->decl
)
3579 if (srcidx
!= dstidx
)
3580 (*child_cfun
->local_decls
)[dstidx
] = t
;
3584 vec_safe_truncate (child_cfun
->local_decls
, dstidx
);
3586 /* Inform the callgraph about the new function. */
3587 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
3588 = cfun
->curr_properties
& ~PROP_loops
;
3589 cgraph_add_new_function (child_fn
, true);
3591 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3592 fixed in a following pass. */
3593 push_cfun (child_cfun
);
3595 optimize_omp_library_calls (entry_stmt
);
3596 rebuild_cgraph_edges ();
3598 /* Some EH regions might become dead, see PR34608. If
3599 pass_cleanup_cfg isn't the first pass to happen with the
3600 new child, these dead EH edges might cause problems.
3601 Clean them up now. */
3602 if (flag_exceptions
)
3605 bool changed
= false;
3608 changed
|= gimple_purge_dead_eh_edges (bb
);
3610 cleanup_tree_cfg ();
3612 if (gimple_in_ssa_p (cfun
))
3613 update_ssa (TODO_update_ssa
);
3617 /* Emit a library call to launch the children threads. */
3618 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
3619 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
3621 expand_task_call (new_bb
, entry_stmt
);
3622 if (gimple_in_ssa_p (cfun
))
3623 update_ssa (TODO_update_ssa_only_virtuals
);
3627 /* A subroutine of expand_omp_for. Generate code for a parallel
3628 loop with any schedule. Given parameters:
3630 for (V = N1; V cond N2; V += STEP) BODY;
3632 where COND is "<" or ">", we generate pseudocode
3634 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3635 if (more) goto L0; else goto L3;
3642 if (V cond iend) goto L1; else goto L2;
3644 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3647 If this is a combined omp parallel loop, instead of the call to
3648 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3650 For collapsed loops, given parameters:
3652 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3653 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3654 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3657 we generate pseudocode
3663 count3 = (adj + N32 - N31) / STEP3;
3668 count2 = (adj + N22 - N21) / STEP2;
3673 count1 = (adj + N12 - N11) / STEP1;
3674 count = count1 * count2 * count3;
3675 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3676 if (more) goto L0; else goto L3;
3680 V3 = N31 + (T % count3) * STEP3;
3682 V2 = N21 + (T % count2) * STEP2;
3684 V1 = N11 + T * STEP1;
3689 if (V < iend) goto L10; else goto L2;
3692 if (V3 cond3 N32) goto L1; else goto L11;
3696 if (V2 cond2 N22) goto L1; else goto L12;
3702 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3708 expand_omp_for_generic (struct omp_region
*region
,
3709 struct omp_for_data
*fd
,
3710 enum built_in_function start_fn
,
3711 enum built_in_function next_fn
)
3713 tree type
, istart0
, iend0
, iend
;
3714 tree t
, vmain
, vback
, bias
= NULL_TREE
;
3715 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
3716 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
3717 gimple_stmt_iterator gsi
;
3719 bool in_combined_parallel
= is_combined_parallel (region
);
3720 bool broken_loop
= region
->cont
== NULL
;
3722 tree
*counts
= NULL
;
3725 gcc_assert (!broken_loop
|| !in_combined_parallel
);
3726 gcc_assert (fd
->iter_type
== long_integer_type_node
3727 || !in_combined_parallel
);
3729 type
= TREE_TYPE (fd
->loop
.v
);
3730 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
3731 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
3732 TREE_ADDRESSABLE (istart0
) = 1;
3733 TREE_ADDRESSABLE (iend0
) = 1;
3735 /* See if we need to bias by LLONG_MIN. */
3736 if (fd
->iter_type
== long_long_unsigned_type_node
3737 && TREE_CODE (type
) == INTEGER_TYPE
3738 && !TYPE_UNSIGNED (type
))
3742 if (fd
->loop
.cond_code
== LT_EXPR
)
3745 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3749 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3752 if (TREE_CODE (n1
) != INTEGER_CST
3753 || TREE_CODE (n2
) != INTEGER_CST
3754 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
3755 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
3758 entry_bb
= region
->entry
;
3759 cont_bb
= region
->cont
;
3761 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
3762 gcc_assert (broken_loop
3763 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
3764 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
3765 l1_bb
= single_succ (l0_bb
);
3768 l2_bb
= create_empty_bb (cont_bb
);
3769 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
3770 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3774 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
3775 exit_bb
= region
->exit
;
3777 gsi
= gsi_last_bb (entry_bb
);
3779 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3780 if (fd
->collapse
> 1)
3782 /* collapsed loops need work for expansion in SSA form. */
3783 gcc_assert (!gimple_in_ssa_p (cfun
));
3784 counts
= (tree
*) alloca (fd
->collapse
* sizeof (tree
));
3785 for (i
= 0; i
< fd
->collapse
; i
++)
3787 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
3789 if (POINTER_TYPE_P (itype
))
3790 itype
= signed_type_for (itype
);
3791 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
3793 t
= fold_build2 (PLUS_EXPR
, itype
,
3794 fold_convert (itype
, fd
->loops
[i
].step
), t
);
3795 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
3796 fold_convert (itype
, fd
->loops
[i
].n2
));
3797 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
3798 fold_convert (itype
, fd
->loops
[i
].n1
));
3799 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
3800 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3801 fold_build1 (NEGATE_EXPR
, itype
, t
),
3802 fold_build1 (NEGATE_EXPR
, itype
,
3803 fold_convert (itype
,
3804 fd
->loops
[i
].step
)));
3806 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
3807 fold_convert (itype
, fd
->loops
[i
].step
));
3808 t
= fold_convert (type
, t
);
3809 if (TREE_CODE (t
) == INTEGER_CST
)
3813 counts
[i
] = create_tmp_reg (type
, ".count");
3814 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3815 true, GSI_SAME_STMT
);
3816 stmt
= gimple_build_assign (counts
[i
], t
);
3817 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3819 if (SSA_VAR_P (fd
->loop
.n2
))
3825 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
3826 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3827 true, GSI_SAME_STMT
);
3829 stmt
= gimple_build_assign (fd
->loop
.n2
, t
);
3830 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3834 if (in_combined_parallel
)
3836 /* In a combined parallel loop, emit a call to
3837 GOMP_loop_foo_next. */
3838 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
3839 build_fold_addr_expr (istart0
),
3840 build_fold_addr_expr (iend0
));
3844 tree t0
, t1
, t2
, t3
, t4
;
3845 /* If this is not a combined parallel loop, emit a call to
3846 GOMP_loop_foo_start in ENTRY_BB. */
3847 t4
= build_fold_addr_expr (iend0
);
3848 t3
= build_fold_addr_expr (istart0
);
3849 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
3850 if (POINTER_TYPE_P (type
)
3851 && TYPE_PRECISION (type
) != TYPE_PRECISION (fd
->iter_type
))
3853 /* Avoid casting pointers to integer of a different size. */
3854 tree itype
= signed_type_for (type
);
3855 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n2
));
3856 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n1
));
3860 t1
= fold_convert (fd
->iter_type
, fd
->loop
.n2
);
3861 t0
= fold_convert (fd
->iter_type
, fd
->loop
.n1
);
3865 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
3866 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
3868 if (fd
->iter_type
== long_integer_type_node
)
3872 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3873 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3874 6, t0
, t1
, t2
, t
, t3
, t4
);
3877 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3878 5, t0
, t1
, t2
, t3
, t4
);
3886 /* The GOMP_loop_ull_*start functions have additional boolean
3887 argument, true for < loops and false for > loops.
3888 In Fortran, the C bool type can be different from
3889 boolean_type_node. */
3890 bfn_decl
= builtin_decl_explicit (start_fn
);
3891 c_bool_type
= TREE_TYPE (TREE_TYPE (bfn_decl
));
3892 t5
= build_int_cst (c_bool_type
,
3893 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
3896 tree bfn_decl
= builtin_decl_explicit (start_fn
);
3897 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3898 t
= build_call_expr (bfn_decl
, 7, t5
, t0
, t1
, t2
, t
, t3
, t4
);
3901 t
= build_call_expr (builtin_decl_explicit (start_fn
),
3902 6, t5
, t0
, t1
, t2
, t3
, t4
);
3905 if (TREE_TYPE (t
) != boolean_type_node
)
3906 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
3907 t
, build_int_cst (TREE_TYPE (t
), 0));
3908 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3909 true, GSI_SAME_STMT
);
3910 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
3912 /* Remove the GIMPLE_OMP_FOR statement. */
3913 gsi_remove (&gsi
, true);
3915 /* Iteration setup for sequential loop goes in L0_BB. */
3916 gsi
= gsi_start_bb (l0_bb
);
3919 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3920 if (POINTER_TYPE_P (type
))
3921 t
= fold_convert (signed_type_for (type
), t
);
3922 t
= fold_convert (type
, t
);
3923 t
= force_gimple_operand_gsi (&gsi
, t
,
3925 && TREE_ADDRESSABLE (fd
->loop
.v
),
3926 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
3927 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
3928 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3932 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3933 if (POINTER_TYPE_P (type
))
3934 t
= fold_convert (signed_type_for (type
), t
);
3935 t
= fold_convert (type
, t
);
3936 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3937 false, GSI_CONTINUE_LINKING
);
3938 if (fd
->collapse
> 1)
3940 tree tem
= create_tmp_reg (type
, ".tem");
3941 stmt
= gimple_build_assign (tem
, fd
->loop
.v
);
3942 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3943 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3945 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
;
3947 if (POINTER_TYPE_P (vtype
))
3948 itype
= signed_type_for (vtype
);
3949 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
3950 t
= fold_convert (itype
, t
);
3951 t
= fold_build2 (MULT_EXPR
, itype
, t
,
3952 fold_convert (itype
, fd
->loops
[i
].step
));
3953 if (POINTER_TYPE_P (vtype
))
3954 t
= fold_build_pointer_plus (fd
->loops
[i
].n1
, t
);
3956 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
3957 t
= force_gimple_operand_gsi (&gsi
, t
,
3958 DECL_P (fd
->loops
[i
].v
)
3959 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
3961 GSI_CONTINUE_LINKING
);
3962 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
3963 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3966 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
3967 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3968 false, GSI_CONTINUE_LINKING
);
3969 stmt
= gimple_build_assign (tem
, t
);
3970 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3977 /* Code to control the increment and predicate for the sequential
3978 loop goes in the CONT_BB. */
3979 gsi
= gsi_last_bb (cont_bb
);
3980 stmt
= gsi_stmt (gsi
);
3981 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
3982 vmain
= gimple_omp_continue_control_use (stmt
);
3983 vback
= gimple_omp_continue_control_def (stmt
);
3985 if (POINTER_TYPE_P (type
))
3986 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
3988 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
3989 t
= force_gimple_operand_gsi (&gsi
, t
,
3990 DECL_P (vback
) && TREE_ADDRESSABLE (vback
),
3991 NULL_TREE
, true, GSI_SAME_STMT
);
3992 stmt
= gimple_build_assign (vback
, t
);
3993 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3995 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
3996 DECL_P (vback
) && TREE_ADDRESSABLE (vback
) ? t
: vback
,
3998 stmt
= gimple_build_cond_empty (t
);
3999 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4001 /* Remove GIMPLE_OMP_CONTINUE. */
4002 gsi_remove (&gsi
, true);
4004 if (fd
->collapse
> 1)
4006 basic_block last_bb
, bb
;
4009 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
4011 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
4013 bb
= create_empty_bb (last_bb
);
4014 gsi
= gsi_start_bb (bb
);
4016 if (i
< fd
->collapse
- 1)
4018 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
4019 e
->probability
= REG_BR_PROB_BASE
/ 8;
4021 t
= fd
->loops
[i
+ 1].n1
;
4022 t
= force_gimple_operand_gsi (&gsi
, t
,
4023 DECL_P (fd
->loops
[i
+ 1].v
)
4025 (fd
->loops
[i
+ 1].v
),
4027 GSI_CONTINUE_LINKING
);
4028 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
4029 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4034 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
4036 if (POINTER_TYPE_P (vtype
))
4037 t
= fold_build_pointer_plus (fd
->loops
[i
].v
, fd
->loops
[i
].step
);
4039 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
,
4041 t
= force_gimple_operand_gsi (&gsi
, t
,
4042 DECL_P (fd
->loops
[i
].v
)
4043 && TREE_ADDRESSABLE (fd
->loops
[i
].v
),
4045 GSI_CONTINUE_LINKING
);
4046 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
4047 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4051 t
= fd
->loops
[i
].n2
;
4052 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4053 false, GSI_CONTINUE_LINKING
);
4054 tree v
= fd
->loops
[i
].v
;
4055 if (DECL_P (v
) && TREE_ADDRESSABLE (v
))
4056 v
= force_gimple_operand_gsi (&gsi
, v
, true, NULL_TREE
,
4057 false, GSI_CONTINUE_LINKING
);
4058 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
4060 stmt
= gimple_build_cond_empty (t
);
4061 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4062 e
= make_edge (bb
, l1_bb
, EDGE_TRUE_VALUE
);
4063 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4066 make_edge (bb
, l1_bb
, EDGE_FALLTHRU
);
4071 /* Emit code to get the next parallel iteration in L2_BB. */
4072 gsi
= gsi_start_bb (l2_bb
);
4074 t
= build_call_expr (builtin_decl_explicit (next_fn
), 2,
4075 build_fold_addr_expr (istart0
),
4076 build_fold_addr_expr (iend0
));
4077 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4078 false, GSI_CONTINUE_LINKING
);
4079 if (TREE_TYPE (t
) != boolean_type_node
)
4080 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
4081 t
, build_int_cst (TREE_TYPE (t
), 0));
4082 stmt
= gimple_build_cond_empty (t
);
4083 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4086 /* Add the loop cleanup function. */
4087 gsi
= gsi_last_bb (exit_bb
);
4088 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4089 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT
);
4091 t
= builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END
);
4092 stmt
= gimple_build_call (t
, 0);
4093 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
4094 gsi_remove (&gsi
, true);
4096 /* Connect the new blocks. */
4097 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
4098 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
4104 e
= find_edge (cont_bb
, l3_bb
);
4105 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
4107 phis
= phi_nodes (l3_bb
);
4108 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4110 gimple phi
= gsi_stmt (gsi
);
4111 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
4112 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
4116 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
4117 if (fd
->collapse
> 1)
4119 e
= find_edge (cont_bb
, l1_bb
);
4121 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
4125 e
= find_edge (cont_bb
, l1_bb
);
4126 e
->flags
= EDGE_TRUE_VALUE
;
4128 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4129 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
4130 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4132 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
4133 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
4134 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
4135 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
4136 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
4137 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
4138 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
4139 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
4144 /* A subroutine of expand_omp_for. Generate code for a parallel
4145 loop with static schedule and no specified chunk size. Given
4148 for (V = N1; V cond N2; V += STEP) BODY;
4150 where COND is "<" or ">", we generate pseudocode
4156 if ((__typeof (V)) -1 > 0 && cond is >)
4157 n = -(adj + N2 - N1) / -STEP;
4159 n = (adj + N2 - N1) / STEP;
4162 if (threadid < tt) goto L3; else goto L4;
4167 s0 = q * threadid + tt;
4170 if (s0 >= e0) goto L2; else goto L0;
4176 if (V cond e) goto L1;
4181 expand_omp_for_static_nochunk (struct omp_region
*region
,
4182 struct omp_for_data
*fd
)
4184 tree n
, q
, s0
, e0
, e
, t
, tt
, nthreads
, threadid
;
4185 tree type
, itype
, vmain
, vback
;
4186 basic_block entry_bb
, second_bb
, third_bb
, exit_bb
, seq_start_bb
;
4187 basic_block body_bb
, cont_bb
;
4189 gimple_stmt_iterator gsi
;
4193 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4194 if (POINTER_TYPE_P (type
))
4195 itype
= signed_type_for (type
);
4197 entry_bb
= region
->entry
;
4198 cont_bb
= region
->cont
;
4199 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4200 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4201 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
4202 body_bb
= single_succ (seq_start_bb
);
4203 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4204 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4205 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4206 exit_bb
= region
->exit
;
4208 /* Iteration space partitioning goes in ENTRY_BB. */
4209 gsi
= gsi_last_bb (entry_bb
);
4210 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4212 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
4213 t
= fold_convert (itype
, t
);
4214 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4215 true, GSI_SAME_STMT
);
4217 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
4218 t
= fold_convert (itype
, t
);
4219 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4220 true, GSI_SAME_STMT
);
4223 = force_gimple_operand_gsi (&gsi
, fold_convert (type
, fd
->loop
.n1
),
4224 true, NULL_TREE
, true, GSI_SAME_STMT
);
4226 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.n2
),
4227 true, NULL_TREE
, true, GSI_SAME_STMT
);
4229 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.step
),
4230 true, NULL_TREE
, true, GSI_SAME_STMT
);
4232 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4233 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4234 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4235 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4236 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4237 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4238 fold_build1 (NEGATE_EXPR
, itype
, t
),
4239 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4241 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4242 t
= fold_convert (itype
, t
);
4243 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4245 q
= create_tmp_reg (itype
, "q");
4246 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
4247 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4248 gsi_insert_before (&gsi
, gimple_build_assign (q
, t
), GSI_SAME_STMT
);
4250 tt
= create_tmp_reg (itype
, "tt");
4251 t
= fold_build2 (TRUNC_MOD_EXPR
, itype
, n
, nthreads
);
4252 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
, true, GSI_SAME_STMT
);
4253 gsi_insert_before (&gsi
, gimple_build_assign (tt
, t
), GSI_SAME_STMT
);
4255 t
= build2 (LT_EXPR
, boolean_type_node
, threadid
, tt
);
4256 stmt
= gimple_build_cond_empty (t
);
4257 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4259 second_bb
= split_block (entry_bb
, stmt
)->dest
;
4260 gsi
= gsi_last_bb (second_bb
);
4261 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4263 gsi_insert_before (&gsi
, gimple_build_assign (tt
, build_int_cst (itype
, 0)),
4265 stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, q
, q
,
4266 build_int_cst (itype
, 1));
4267 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4269 third_bb
= split_block (second_bb
, stmt
)->dest
;
4270 gsi
= gsi_last_bb (third_bb
);
4271 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4273 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
4274 t
= build2 (PLUS_EXPR
, itype
, t
, tt
);
4275 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4277 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
4278 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4280 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
4281 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4283 /* Remove the GIMPLE_OMP_FOR statement. */
4284 gsi_remove (&gsi
, true);
4286 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4287 gsi
= gsi_start_bb (seq_start_bb
);
4289 t
= fold_convert (itype
, s0
);
4290 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4291 if (POINTER_TYPE_P (type
))
4292 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4294 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4295 t
= force_gimple_operand_gsi (&gsi
, t
,
4297 && TREE_ADDRESSABLE (fd
->loop
.v
),
4298 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
4299 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4300 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4302 t
= fold_convert (itype
, e0
);
4303 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4304 if (POINTER_TYPE_P (type
))
4305 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4307 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4308 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4309 false, GSI_CONTINUE_LINKING
);
4311 /* The code controlling the sequential loop replaces the
4312 GIMPLE_OMP_CONTINUE. */
4313 gsi
= gsi_last_bb (cont_bb
);
4314 stmt
= gsi_stmt (gsi
);
4315 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4316 vmain
= gimple_omp_continue_control_use (stmt
);
4317 vback
= gimple_omp_continue_control_def (stmt
);
4319 if (POINTER_TYPE_P (type
))
4320 t
= fold_build_pointer_plus (vmain
, fd
->loop
.step
);
4322 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
4323 t
= force_gimple_operand_gsi (&gsi
, t
,
4324 DECL_P (vback
) && TREE_ADDRESSABLE (vback
),
4325 NULL_TREE
, true, GSI_SAME_STMT
);
4326 stmt
= gimple_build_assign (vback
, t
);
4327 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4329 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
4330 DECL_P (vback
) && TREE_ADDRESSABLE (vback
) ? t
: vback
, e
);
4331 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4333 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4334 gsi_remove (&gsi
, true);
4336 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4337 gsi
= gsi_last_bb (exit_bb
);
4338 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4339 force_gimple_operand_gsi (&gsi
, build_omp_barrier (), false, NULL_TREE
,
4340 false, GSI_SAME_STMT
);
4341 gsi_remove (&gsi
, true);
4343 /* Connect all the blocks. */
4344 ep
= make_edge (entry_bb
, third_bb
, EDGE_FALSE_VALUE
);
4345 ep
->probability
= REG_BR_PROB_BASE
/ 4 * 3;
4346 ep
= find_edge (entry_bb
, second_bb
);
4347 ep
->flags
= EDGE_TRUE_VALUE
;
4348 ep
->probability
= REG_BR_PROB_BASE
/ 4;
4349 find_edge (third_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
4350 find_edge (third_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
4352 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4353 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4355 set_immediate_dominator (CDI_DOMINATORS
, second_bb
, entry_bb
);
4356 set_immediate_dominator (CDI_DOMINATORS
, third_bb
, entry_bb
);
4357 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, third_bb
);
4358 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4359 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4360 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4361 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4365 /* A subroutine of expand_omp_for. Generate code for a parallel
4366 loop with static schedule and a specified chunk size. Given
4369 for (V = N1; V cond N2; V += STEP) BODY;
4371 where COND is "<" or ">", we generate pseudocode
4377 if ((__typeof (V)) -1 > 0 && cond is >)
4378 n = -(adj + N2 - N1) / -STEP;
4380 n = (adj + N2 - N1) / STEP;
4382 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4383 here so that V is defined
4384 if the loop is not entered
4386 s0 = (trip * nthreads + threadid) * CHUNK;
4387 e0 = min(s0 + CHUNK, n);
4388 if (s0 < n) goto L1; else goto L4;
4395 if (V cond e) goto L2; else goto L3;
4403 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
4405 tree n
, s0
, e0
, e
, t
;
4406 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
4407 tree type
, itype
, v_main
, v_back
, v_extra
;
4408 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
4409 basic_block trip_update_bb
, cont_bb
, fin_bb
;
4410 gimple_stmt_iterator si
;
4414 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4415 if (POINTER_TYPE_P (type
))
4416 itype
= signed_type_for (type
);
4418 entry_bb
= region
->entry
;
4419 se
= split_block (entry_bb
, last_stmt (entry_bb
));
4421 iter_part_bb
= se
->dest
;
4422 cont_bb
= region
->cont
;
4423 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
4424 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
4425 == FALLTHRU_EDGE (cont_bb
)->dest
);
4426 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
4427 body_bb
= single_succ (seq_start_bb
);
4428 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4429 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4430 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4431 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
4432 exit_bb
= region
->exit
;
4434 /* Trip and adjustment setup goes in ENTRY_BB. */
4435 si
= gsi_last_bb (entry_bb
);
4436 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_FOR
);
4438 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS
), 0);
4439 t
= fold_convert (itype
, t
);
4440 nthreads
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4441 true, GSI_SAME_STMT
);
4443 t
= build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
), 0);
4444 t
= fold_convert (itype
, t
);
4445 threadid
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4446 true, GSI_SAME_STMT
);
4449 = force_gimple_operand_gsi (&si
, fold_convert (type
, fd
->loop
.n1
),
4450 true, NULL_TREE
, true, GSI_SAME_STMT
);
4452 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.n2
),
4453 true, NULL_TREE
, true, GSI_SAME_STMT
);
4455 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.step
),
4456 true, NULL_TREE
, true, GSI_SAME_STMT
);
4458 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->chunk_size
),
4459 true, NULL_TREE
, true, GSI_SAME_STMT
);
4461 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4462 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4463 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4464 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4465 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4466 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4467 fold_build1 (NEGATE_EXPR
, itype
, t
),
4468 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4470 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4471 t
= fold_convert (itype
, t
);
4472 n
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4473 true, GSI_SAME_STMT
);
4475 trip_var
= create_tmp_reg (itype
, ".trip");
4476 if (gimple_in_ssa_p (cfun
))
4478 trip_init
= make_ssa_name (trip_var
, NULL
);
4479 trip_main
= make_ssa_name (trip_var
, NULL
);
4480 trip_back
= make_ssa_name (trip_var
, NULL
);
4484 trip_init
= trip_var
;
4485 trip_main
= trip_var
;
4486 trip_back
= trip_var
;
4489 stmt
= gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
4490 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4492 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
4493 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4494 if (POINTER_TYPE_P (type
))
4495 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4497 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4498 v_extra
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4499 true, GSI_SAME_STMT
);
4501 /* Remove the GIMPLE_OMP_FOR. */
4502 gsi_remove (&si
, true);
4504 /* Iteration space partitioning goes in ITER_PART_BB. */
4505 si
= gsi_last_bb (iter_part_bb
);
4507 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
4508 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
4509 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
4510 s0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4511 false, GSI_CONTINUE_LINKING
);
4513 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
4514 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4515 e0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4516 false, GSI_CONTINUE_LINKING
);
4518 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
4519 gsi_insert_after (&si
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
4521 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4522 si
= gsi_start_bb (seq_start_bb
);
4524 t
= fold_convert (itype
, s0
);
4525 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4526 if (POINTER_TYPE_P (type
))
4527 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4529 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4530 t
= force_gimple_operand_gsi (&si
, t
,
4532 && TREE_ADDRESSABLE (fd
->loop
.v
),
4533 NULL_TREE
, false, GSI_CONTINUE_LINKING
);
4534 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4535 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4537 t
= fold_convert (itype
, e0
);
4538 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4539 if (POINTER_TYPE_P (type
))
4540 t
= fold_build_pointer_plus (fd
->loop
.n1
, t
);
4542 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4543 e
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4544 false, GSI_CONTINUE_LINKING
);
4546 /* The code controlling the sequential loop goes in CONT_BB,
4547 replacing the GIMPLE_OMP_CONTINUE. */
4548 si
= gsi_last_bb (cont_bb
);
4549 stmt
= gsi_stmt (si
);
4550 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4551 v_main
= gimple_omp_continue_control_use (stmt
);
4552 v_back
= gimple_omp_continue_control_def (stmt
);
4554 if (POINTER_TYPE_P (type
))
4555 t
= fold_build_pointer_plus (v_main
, fd
->loop
.step
);
4557 t
= fold_build2 (PLUS_EXPR
, type
, v_main
, fd
->loop
.step
);
4558 if (DECL_P (v_back
) && TREE_ADDRESSABLE (v_back
))
4559 t
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4560 true, GSI_SAME_STMT
);
4561 stmt
= gimple_build_assign (v_back
, t
);
4562 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4564 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
,
4565 DECL_P (v_back
) && TREE_ADDRESSABLE (v_back
)
4567 gsi_insert_before (&si
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4569 /* Remove GIMPLE_OMP_CONTINUE. */
4570 gsi_remove (&si
, true);
4572 /* Trip update code goes into TRIP_UPDATE_BB. */
4573 si
= gsi_start_bb (trip_update_bb
);
4575 t
= build_int_cst (itype
, 1);
4576 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
4577 stmt
= gimple_build_assign (trip_back
, t
);
4578 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4580 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4581 si
= gsi_last_bb (exit_bb
);
4582 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
4583 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4584 false, GSI_SAME_STMT
);
4585 gsi_remove (&si
, true);
4587 /* Connect the new blocks. */
4588 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
4589 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4591 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4592 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
4594 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
4596 if (gimple_in_ssa_p (cfun
))
4598 gimple_stmt_iterator psi
;
4601 edge_var_map_vector
*head
;
4605 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4606 remove arguments of the phi nodes in fin_bb. We need to create
4607 appropriate phi nodes in iter_part_bb instead. */
4608 se
= single_pred_edge (fin_bb
);
4609 re
= single_succ_edge (trip_update_bb
);
4610 head
= redirect_edge_var_map_vector (re
);
4611 ene
= single_succ_edge (entry_bb
);
4613 psi
= gsi_start_phis (fin_bb
);
4614 for (i
= 0; !gsi_end_p (psi
) && head
->iterate (i
, &vm
);
4615 gsi_next (&psi
), ++i
)
4618 source_location locus
;
4620 phi
= gsi_stmt (psi
);
4621 t
= gimple_phi_result (phi
);
4622 gcc_assert (t
== redirect_edge_var_map_result (vm
));
4623 nphi
= create_phi_node (t
, iter_part_bb
);
4625 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
4626 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
4628 /* A special case -- fd->loop.v is not yet computed in
4629 iter_part_bb, we need to use v_extra instead. */
4630 if (t
== fd
->loop
.v
)
4632 add_phi_arg (nphi
, t
, ene
, locus
);
4633 locus
= redirect_edge_var_map_location (vm
);
4634 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
4636 gcc_assert (!gsi_end_p (psi
) && i
== head
->length ());
4637 redirect_edge_var_map_clear (re
);
4640 psi
= gsi_start_phis (fin_bb
);
4641 if (gsi_end_p (psi
))
4643 remove_phi_node (&psi
, false);
4646 /* Make phi node for trip. */
4647 phi
= create_phi_node (trip_main
, iter_part_bb
);
4648 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
4650 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
4654 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
4655 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
4656 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
4657 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4658 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4659 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
4660 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
4661 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4662 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4666 /* Expand the OpenMP loop defined by REGION. */
4669 expand_omp_for (struct omp_region
*region
)
4671 struct omp_for_data fd
;
4672 struct omp_for_data_loop
*loops
;
4675 = (struct omp_for_data_loop
*)
4676 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
4677 * sizeof (struct omp_for_data_loop
));
4678 extract_omp_for_data (last_stmt (region
->entry
), &fd
, loops
);
4679 region
->sched_kind
= fd
.sched_kind
;
4681 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
4682 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4683 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4686 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
4687 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4688 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4691 if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
4694 && region
->cont
!= NULL
)
4696 if (fd
.chunk_size
== NULL
)
4697 expand_omp_for_static_nochunk (region
, &fd
);
4699 expand_omp_for_static_chunk (region
, &fd
);
4703 int fn_index
, start_ix
, next_ix
;
4705 if (fd
.chunk_size
== NULL
4706 && fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
4707 fd
.chunk_size
= integer_zero_node
;
4708 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
4709 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
4710 ? 3 : fd
.sched_kind
;
4711 fn_index
+= fd
.have_ordered
* 4;
4712 start_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_START
) + fn_index
;
4713 next_ix
= ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
) + fn_index
;
4714 if (fd
.iter_type
== long_long_unsigned_type_node
)
4716 start_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4717 - (int)BUILT_IN_GOMP_LOOP_STATIC_START
);
4718 next_ix
+= ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4719 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT
);
4721 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
4722 (enum built_in_function
) next_ix
);
4725 if (gimple_in_ssa_p (cfun
))
4726 update_ssa (TODO_update_ssa_only_virtuals
);
4730 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4732 v = GOMP_sections_start (n);
4749 v = GOMP_sections_next ();
4754 If this is a combined parallel sections, replace the call to
4755 GOMP_sections_start with call to GOMP_sections_next. */
4758 expand_omp_sections (struct omp_region
*region
)
4760 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
4761 vec
<tree
> label_vec
;
4763 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
4764 gimple_stmt_iterator si
, switch_si
;
4765 gimple sections_stmt
, stmt
, cont
;
4768 struct omp_region
*inner
;
4770 bool exit_reachable
= region
->cont
!= NULL
;
4772 gcc_assert (region
->exit
!= NULL
);
4773 entry_bb
= region
->entry
;
4774 l0_bb
= single_succ (entry_bb
);
4775 l1_bb
= region
->cont
;
4776 l2_bb
= region
->exit
;
4777 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
4778 l2
= gimple_block_label (l2_bb
);
4781 /* This can happen if there are reductions. */
4782 len
= EDGE_COUNT (l0_bb
->succs
);
4783 gcc_assert (len
> 0);
4784 e
= EDGE_SUCC (l0_bb
, len
- 1);
4785 si
= gsi_last_bb (e
->dest
);
4788 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4789 l2
= gimple_block_label (e
->dest
);
4791 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
4793 si
= gsi_last_bb (e
->dest
);
4795 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4797 l2
= gimple_block_label (e
->dest
);
4803 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
4805 default_bb
= create_empty_bb (l0_bb
);
4807 /* We will build a switch() with enough cases for all the
4808 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4809 and a default case to abort if something goes wrong. */
4810 len
= EDGE_COUNT (l0_bb
->succs
);
4812 /* Use vec::quick_push on label_vec throughout, since we know the size
4814 label_vec
.create (len
);
4816 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4817 GIMPLE_OMP_SECTIONS statement. */
4818 si
= gsi_last_bb (entry_bb
);
4819 sections_stmt
= gsi_stmt (si
);
4820 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
4821 vin
= gimple_omp_sections_control (sections_stmt
);
4822 if (!is_combined_parallel (region
))
4824 /* If we are not inside a combined parallel+sections region,
4825 call GOMP_sections_start. */
4826 t
= build_int_cst (unsigned_type_node
,
4827 exit_reachable
? len
- 1 : len
);
4828 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START
);
4829 stmt
= gimple_build_call (u
, 1, t
);
4833 /* Otherwise, call GOMP_sections_next. */
4834 u
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
4835 stmt
= gimple_build_call (u
, 0);
4837 gimple_call_set_lhs (stmt
, vin
);
4838 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4839 gsi_remove (&si
, true);
4841 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4843 switch_si
= gsi_last_bb (l0_bb
);
4844 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
4847 cont
= last_stmt (l1_bb
);
4848 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
4849 vmain
= gimple_omp_continue_control_use (cont
);
4850 vnext
= gimple_omp_continue_control_def (cont
);
4858 t
= build_case_label (build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
4859 label_vec
.quick_push (t
);
4862 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4863 for (inner
= region
->inner
, casei
= 1;
4865 inner
= inner
->next
, i
++, casei
++)
4867 basic_block s_entry_bb
, s_exit_bb
;
4869 /* Skip optional reduction region. */
4870 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
4877 s_entry_bb
= inner
->entry
;
4878 s_exit_bb
= inner
->exit
;
4880 t
= gimple_block_label (s_entry_bb
);
4881 u
= build_int_cst (unsigned_type_node
, casei
);
4882 u
= build_case_label (u
, NULL
, t
);
4883 label_vec
.quick_push (u
);
4885 si
= gsi_last_bb (s_entry_bb
);
4886 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
4887 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
4888 gsi_remove (&si
, true);
4889 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
4891 if (s_exit_bb
== NULL
)
4894 si
= gsi_last_bb (s_exit_bb
);
4895 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4896 gsi_remove (&si
, true);
4898 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
4901 /* Error handling code goes in DEFAULT_BB. */
4902 t
= gimple_block_label (default_bb
);
4903 u
= build_case_label (NULL
, NULL
, t
);
4904 make_edge (l0_bb
, default_bb
, 0);
4906 stmt
= gimple_build_switch (vmain
, u
, label_vec
);
4907 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
4908 gsi_remove (&switch_si
, true);
4909 label_vec
.release ();
4911 si
= gsi_start_bb (default_bb
);
4912 stmt
= gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP
), 0);
4913 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4919 /* Code to get the next section goes in L1_BB. */
4920 si
= gsi_last_bb (l1_bb
);
4921 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
4923 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT
);
4924 stmt
= gimple_build_call (bfn_decl
, 0);
4925 gimple_call_set_lhs (stmt
, vnext
);
4926 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4927 gsi_remove (&si
, true);
4929 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
4932 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4933 si
= gsi_last_bb (l2_bb
);
4934 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
4935 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT
);
4937 t
= builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END
);
4938 stmt
= gimple_build_call (t
, 0);
4939 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4940 gsi_remove (&si
, true);
4942 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
4946 /* Expand code for an OpenMP single directive. We've already expanded
4947 much of the code, here we simply place the GOMP_barrier call. */
4950 expand_omp_single (struct omp_region
*region
)
4952 basic_block entry_bb
, exit_bb
;
4953 gimple_stmt_iterator si
;
4954 bool need_barrier
= false;
4956 entry_bb
= region
->entry
;
4957 exit_bb
= region
->exit
;
4959 si
= gsi_last_bb (entry_bb
);
4960 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4961 be removed. We need to ensure that the thread that entered the single
4962 does not exit before the data is copied out by the other threads. */
4963 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si
)),
4964 OMP_CLAUSE_COPYPRIVATE
))
4965 need_barrier
= true;
4966 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
4967 gsi_remove (&si
, true);
4968 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4970 si
= gsi_last_bb (exit_bb
);
4971 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)) || need_barrier
)
4972 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4973 false, GSI_SAME_STMT
);
4974 gsi_remove (&si
, true);
4975 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4979 /* Generic expansion for OpenMP synchronization directives: master,
4980 ordered and critical. All we need to do here is remove the entry
4981 and exit markers for REGION. */
4984 expand_omp_synch (struct omp_region
*region
)
4986 basic_block entry_bb
, exit_bb
;
4987 gimple_stmt_iterator si
;
4989 entry_bb
= region
->entry
;
4990 exit_bb
= region
->exit
;
4992 si
= gsi_last_bb (entry_bb
);
4993 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
4994 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
4995 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
4996 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
);
4997 gsi_remove (&si
, true);
4998 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
5002 si
= gsi_last_bb (exit_bb
);
5003 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
5004 gsi_remove (&si
, true);
5005 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
5009 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5010 operation as a normal volatile load. */
5013 expand_omp_atomic_load (basic_block load_bb
, tree addr
,
5014 tree loaded_val
, int index
)
5016 enum built_in_function tmpbase
;
5017 gimple_stmt_iterator gsi
;
5018 basic_block store_bb
;
5021 tree decl
, call
, type
, itype
;
5023 gsi
= gsi_last_bb (load_bb
);
5024 stmt
= gsi_stmt (gsi
);
5025 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
5026 loc
= gimple_location (stmt
);
5028 /* ??? If the target does not implement atomic_load_optab[mode], and mode
5029 is smaller than word size, then expand_atomic_load assumes that the load
5030 is atomic. We could avoid the builtin entirely in this case. */
5032 tmpbase
= (enum built_in_function
) (BUILT_IN_ATOMIC_LOAD_N
+ index
+ 1);
5033 decl
= builtin_decl_explicit (tmpbase
);
5034 if (decl
== NULL_TREE
)
5037 type
= TREE_TYPE (loaded_val
);
5038 itype
= TREE_TYPE (TREE_TYPE (decl
));
5040 call
= build_call_expr_loc (loc
, decl
, 2, addr
,
5041 build_int_cst (NULL
, MEMMODEL_RELAXED
));
5042 if (!useless_type_conversion_p (type
, itype
))
5043 call
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
5044 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
5046 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5047 gsi_remove (&gsi
, true);
5049 store_bb
= single_succ (load_bb
);
5050 gsi
= gsi_last_bb (store_bb
);
5051 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
5052 gsi_remove (&gsi
, true);
5054 if (gimple_in_ssa_p (cfun
))
5055 update_ssa (TODO_update_ssa_no_phi
);
5060 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5061 operation as a normal volatile store. */
5064 expand_omp_atomic_store (basic_block load_bb
, tree addr
,
5065 tree loaded_val
, tree stored_val
, int index
)
5067 enum built_in_function tmpbase
;
5068 gimple_stmt_iterator gsi
;
5069 basic_block store_bb
= single_succ (load_bb
);
5072 tree decl
, call
, type
, itype
;
5073 enum machine_mode imode
;
5076 gsi
= gsi_last_bb (load_bb
);
5077 stmt
= gsi_stmt (gsi
);
5078 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_LOAD
);
5080 /* If the load value is needed, then this isn't a store but an exchange. */
5081 exchange
= gimple_omp_atomic_need_value_p (stmt
);
5083 gsi
= gsi_last_bb (store_bb
);
5084 stmt
= gsi_stmt (gsi
);
5085 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_ATOMIC_STORE
);
5086 loc
= gimple_location (stmt
);
5088 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5089 is smaller than word size, then expand_atomic_store assumes that the store
5090 is atomic. We could avoid the builtin entirely in this case. */
5092 tmpbase
= (exchange
? BUILT_IN_ATOMIC_EXCHANGE_N
: BUILT_IN_ATOMIC_STORE_N
);
5093 tmpbase
= (enum built_in_function
) ((int) tmpbase
+ index
+ 1);
5094 decl
= builtin_decl_explicit (tmpbase
);
5095 if (decl
== NULL_TREE
)
5098 type
= TREE_TYPE (stored_val
);
5100 /* Dig out the type of the function's second argument. */
5101 itype
= TREE_TYPE (decl
);
5102 itype
= TYPE_ARG_TYPES (itype
);
5103 itype
= TREE_CHAIN (itype
);
5104 itype
= TREE_VALUE (itype
);
5105 imode
= TYPE_MODE (itype
);
5107 if (exchange
&& !can_atomic_exchange_p (imode
, true))
5110 if (!useless_type_conversion_p (itype
, type
))
5111 stored_val
= fold_build1_loc (loc
, VIEW_CONVERT_EXPR
, itype
, stored_val
);
5112 call
= build_call_expr_loc (loc
, decl
, 3, addr
, stored_val
,
5113 build_int_cst (NULL
, MEMMODEL_RELAXED
));
5116 if (!useless_type_conversion_p (type
, itype
))
5117 call
= build1_loc (loc
, VIEW_CONVERT_EXPR
, type
, call
);
5118 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, loaded_val
, call
);
5121 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5122 gsi_remove (&gsi
, true);
5124 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5125 gsi
= gsi_last_bb (load_bb
);
5126 gsi_remove (&gsi
, true);
5128 if (gimple_in_ssa_p (cfun
))
5129 update_ssa (TODO_update_ssa_no_phi
);
5134 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5135 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5136 size of the data type, and thus usable to find the index of the builtin
5137 decl. Returns false if the expression is not of the proper form. */
5140 expand_omp_atomic_fetch_op (basic_block load_bb
,
5141 tree addr
, tree loaded_val
,
5142 tree stored_val
, int index
)
5144 enum built_in_function oldbase
, newbase
, tmpbase
;
5145 tree decl
, itype
, call
;
5147 basic_block store_bb
= single_succ (load_bb
);
5148 gimple_stmt_iterator gsi
;
5151 enum tree_code code
;
5152 bool need_old
, need_new
;
5153 enum machine_mode imode
;
5155 /* We expect to find the following sequences:
5158 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5161 val = tmp OP something; (or: something OP tmp)
5162 GIMPLE_OMP_STORE (val)
5164 ???FIXME: Allow a more flexible sequence.
5165 Perhaps use data flow to pick the statements.
5169 gsi
= gsi_after_labels (store_bb
);
5170 stmt
= gsi_stmt (gsi
);
5171 loc
= gimple_location (stmt
);
5172 if (!is_gimple_assign (stmt
))
5175 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
5177 need_new
= gimple_omp_atomic_need_value_p (gsi_stmt (gsi
));
5178 need_old
= gimple_omp_atomic_need_value_p (last_stmt (load_bb
));
5179 gcc_checking_assert (!need_old
|| !need_new
);
5181 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
5184 /* Check for one of the supported fetch-op operations. */
5185 code
= gimple_assign_rhs_code (stmt
);
5189 case POINTER_PLUS_EXPR
:
5190 oldbase
= BUILT_IN_ATOMIC_FETCH_ADD_N
;
5191 newbase
= BUILT_IN_ATOMIC_ADD_FETCH_N
;
5194 oldbase
= BUILT_IN_ATOMIC_FETCH_SUB_N
;
5195 newbase
= BUILT_IN_ATOMIC_SUB_FETCH_N
;
5198 oldbase
= BUILT_IN_ATOMIC_FETCH_AND_N
;
5199 newbase
= BUILT_IN_ATOMIC_AND_FETCH_N
;
5202 oldbase
= BUILT_IN_ATOMIC_FETCH_OR_N
;
5203 newbase
= BUILT_IN_ATOMIC_OR_FETCH_N
;
5206 oldbase
= BUILT_IN_ATOMIC_FETCH_XOR_N
;
5207 newbase
= BUILT_IN_ATOMIC_XOR_FETCH_N
;
5213 /* Make sure the expression is of the proper form. */
5214 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
5215 rhs
= gimple_assign_rhs2 (stmt
);
5216 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
5217 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
5218 rhs
= gimple_assign_rhs1 (stmt
);
5222 tmpbase
= ((enum built_in_function
)
5223 ((need_new
? newbase
: oldbase
) + index
+ 1));
5224 decl
= builtin_decl_explicit (tmpbase
);
5225 if (decl
== NULL_TREE
)
5227 itype
= TREE_TYPE (TREE_TYPE (decl
));
5228 imode
= TYPE_MODE (itype
);
5230 /* We could test all of the various optabs involved, but the fact of the
5231 matter is that (with the exception of i486 vs i586 and xadd) all targets
5232 that support any atomic operaton optab also implements compare-and-swap.
5233 Let optabs.c take care of expanding any compare-and-swap loop. */
5234 if (!can_compare_and_swap_p (imode
, true))
5237 gsi
= gsi_last_bb (load_bb
);
5238 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5240 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5241 It only requires that the operation happen atomically. Thus we can
5242 use the RELAXED memory model. */
5243 call
= build_call_expr_loc (loc
, decl
, 3, addr
,
5244 fold_convert_loc (loc
, itype
, rhs
),
5245 build_int_cst (NULL
, MEMMODEL_RELAXED
));
5247 if (need_old
|| need_new
)
5249 lhs
= need_old
? loaded_val
: stored_val
;
5250 call
= fold_convert_loc (loc
, TREE_TYPE (lhs
), call
);
5251 call
= build2_loc (loc
, MODIFY_EXPR
, void_type_node
, lhs
, call
);
5254 call
= fold_convert_loc (loc
, void_type_node
, call
);
5255 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5256 gsi_remove (&gsi
, true);
5258 gsi
= gsi_last_bb (store_bb
);
5259 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
5260 gsi_remove (&gsi
, true);
5261 gsi
= gsi_last_bb (store_bb
);
5262 gsi_remove (&gsi
, true);
5264 if (gimple_in_ssa_p (cfun
))
5265 update_ssa (TODO_update_ssa_no_phi
);
5270 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5274 newval = rhs; // with oldval replacing *addr in rhs
5275 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5276 if (oldval != newval)
5279 INDEX is log2 of the size of the data type, and thus usable to find the
5280 index of the builtin decl. */
5283 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
5284 tree addr
, tree loaded_val
, tree stored_val
,
5287 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
5288 tree type
, itype
, cmpxchg
, iaddr
;
5289 gimple_stmt_iterator si
;
5290 basic_block loop_header
= single_succ (load_bb
);
5293 enum built_in_function fncode
;
5295 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5296 order to use the RELAXED memory model effectively. */
5297 fncode
= (enum built_in_function
)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5299 cmpxchg
= builtin_decl_explicit (fncode
);
5300 if (cmpxchg
== NULL_TREE
)
5302 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5303 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
5305 if (!can_compare_and_swap_p (TYPE_MODE (itype
), true))
5308 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5309 si
= gsi_last_bb (load_bb
);
5310 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5312 /* For floating-point values, we'll need to view-convert them to integers
5313 so that we can perform the atomic compare and swap. Simplify the
5314 following code by always setting up the "i"ntegral variables. */
5315 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
5319 iaddr
= create_tmp_reg (build_pointer_type_for_mode (itype
, ptr_mode
,
5322 = force_gimple_operand_gsi (&si
,
5323 fold_convert (TREE_TYPE (iaddr
), addr
),
5324 false, NULL_TREE
, true, GSI_SAME_STMT
);
5325 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
5326 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5327 loadedi
= create_tmp_var (itype
, NULL
);
5328 if (gimple_in_ssa_p (cfun
))
5329 loadedi
= make_ssa_name (loadedi
, NULL
);
5334 loadedi
= loaded_val
;
5338 = force_gimple_operand_gsi (&si
,
5339 build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)),
5341 build_int_cst (TREE_TYPE (iaddr
), 0)),
5342 true, NULL_TREE
, true, GSI_SAME_STMT
);
5344 /* Move the value to the LOADEDI temporary. */
5345 if (gimple_in_ssa_p (cfun
))
5347 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
5348 phi
= create_phi_node (loadedi
, loop_header
);
5349 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
5353 gsi_insert_before (&si
,
5354 gimple_build_assign (loadedi
, initial
),
5356 if (loadedi
!= loaded_val
)
5358 gimple_stmt_iterator gsi2
;
5361 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
5362 gsi2
= gsi_start_bb (loop_header
);
5363 if (gimple_in_ssa_p (cfun
))
5366 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5367 true, GSI_SAME_STMT
);
5368 stmt
= gimple_build_assign (loaded_val
, x
);
5369 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
5373 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
5374 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5375 true, GSI_SAME_STMT
);
5378 gsi_remove (&si
, true);
5380 si
= gsi_last_bb (store_bb
);
5381 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5384 storedi
= stored_val
;
5387 force_gimple_operand_gsi (&si
,
5388 build1 (VIEW_CONVERT_EXPR
, itype
,
5389 stored_val
), true, NULL_TREE
, true,
5392 /* Build the compare&swap statement. */
5393 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
5394 new_storedi
= force_gimple_operand_gsi (&si
,
5395 fold_convert (TREE_TYPE (loadedi
),
5398 true, GSI_SAME_STMT
);
5400 if (gimple_in_ssa_p (cfun
))
5404 old_vali
= create_tmp_var (TREE_TYPE (loadedi
), NULL
);
5405 stmt
= gimple_build_assign (old_vali
, loadedi
);
5406 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5408 stmt
= gimple_build_assign (loadedi
, new_storedi
);
5409 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5412 /* Note that we always perform the comparison as an integer, even for
5413 floating point. This allows the atomic operation to properly
5414 succeed even with NaNs and -0.0. */
5415 stmt
= gimple_build_cond_empty
5416 (build2 (NE_EXPR
, boolean_type_node
,
5417 new_storedi
, old_vali
));
5418 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5421 e
= single_succ_edge (store_bb
);
5422 e
->flags
&= ~EDGE_FALLTHRU
;
5423 e
->flags
|= EDGE_FALSE_VALUE
;
5425 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
5427 /* Copy the new value to loadedi (we already did that before the condition
5428 if we are not in SSA). */
5429 if (gimple_in_ssa_p (cfun
))
5431 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
5432 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
5435 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5436 gsi_remove (&si
, true);
5438 if (gimple_in_ssa_p (cfun
))
5439 update_ssa (TODO_update_ssa_no_phi
);
5444 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5446 GOMP_atomic_start ();
5450 The result is not globally atomic, but works so long as all parallel
5451 references are within #pragma omp atomic directives. According to
5452 responses received from omp@openmp.org, appears to be within spec.
5453 Which makes sense, since that's how several other compilers handle
5454 this situation as well.
5455 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5456 expanding. STORED_VAL is the operand of the matching
5457 GIMPLE_OMP_ATOMIC_STORE.
5460 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5464 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5469 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
5470 tree addr
, tree loaded_val
, tree stored_val
)
5472 gimple_stmt_iterator si
;
5476 si
= gsi_last_bb (load_bb
);
5477 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5479 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START
);
5480 t
= build_call_expr (t
, 0);
5481 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5483 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
5484 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5485 gsi_remove (&si
, true);
5487 si
= gsi_last_bb (store_bb
);
5488 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5490 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
5492 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5494 t
= builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END
);
5495 t
= build_call_expr (t
, 0);
5496 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5497 gsi_remove (&si
, true);
5499 if (gimple_in_ssa_p (cfun
))
5500 update_ssa (TODO_update_ssa_no_phi
);
5504 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5505 using expand_omp_atomic_fetch_op. If it failed, we try to
5506 call expand_omp_atomic_pipeline, and if it fails too, the
5507 ultimate fallback is wrapping the operation in a mutex
5508 (expand_omp_atomic_mutex). REGION is the atomic region built
5509 by build_omp_regions_1(). */
5512 expand_omp_atomic (struct omp_region
*region
)
5514 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
5515 gimple load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
5516 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
5517 tree addr
= gimple_omp_atomic_load_rhs (load
);
5518 tree stored_val
= gimple_omp_atomic_store_val (store
);
5519 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5520 HOST_WIDE_INT index
;
5522 /* Make sure the type is one of the supported sizes. */
5523 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5524 index
= exact_log2 (index
);
5525 if (index
>= 0 && index
<= 4)
5527 unsigned int align
= TYPE_ALIGN_UNIT (type
);
5529 /* __sync builtins require strict data alignment. */
5530 if (exact_log2 (align
) >= index
)
5533 if (loaded_val
== stored_val
5534 && (GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
5535 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
5536 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
5537 && expand_omp_atomic_load (load_bb
, addr
, loaded_val
, index
))
5541 if ((GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_INT
5542 || GET_MODE_CLASS (TYPE_MODE (type
)) == MODE_FLOAT
)
5543 && GET_MODE_BITSIZE (TYPE_MODE (type
)) <= BITS_PER_WORD
5544 && store_bb
== single_succ (load_bb
)
5545 && first_stmt (store_bb
) == store
5546 && expand_omp_atomic_store (load_bb
, addr
, loaded_val
,
5550 /* When possible, use specialized atomic update functions. */
5551 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
5552 && store_bb
== single_succ (load_bb
)
5553 && expand_omp_atomic_fetch_op (load_bb
, addr
,
5554 loaded_val
, stored_val
, index
))
5557 /* If we don't have specialized __sync builtins, try and implement
5558 as a compare and swap loop. */
5559 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
5560 loaded_val
, stored_val
, index
))
5565 /* The ultimate fallback is wrapping the operation in a mutex. */
5566 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
5570 /* Expand the parallel region tree rooted at REGION. Expansion
5571 proceeds in depth-first order. Innermost regions are expanded
5572 first. This way, parallel regions that require a new function to
5573 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5574 internal dependencies in their body. */
5577 expand_omp (struct omp_region
*region
)
5581 location_t saved_location
;
5583 /* First, determine whether this is a combined parallel+workshare
5585 if (region
->type
== GIMPLE_OMP_PARALLEL
)
5586 determine_parallel_type (region
);
5589 expand_omp (region
->inner
);
5591 saved_location
= input_location
;
5592 if (gimple_has_location (last_stmt (region
->entry
)))
5593 input_location
= gimple_location (last_stmt (region
->entry
));
5595 switch (region
->type
)
5597 case GIMPLE_OMP_PARALLEL
:
5598 case GIMPLE_OMP_TASK
:
5599 expand_omp_taskreg (region
);
5602 case GIMPLE_OMP_FOR
:
5603 expand_omp_for (region
);
5606 case GIMPLE_OMP_SECTIONS
:
5607 expand_omp_sections (region
);
5610 case GIMPLE_OMP_SECTION
:
5611 /* Individual omp sections are handled together with their
5612 parent GIMPLE_OMP_SECTIONS region. */
5615 case GIMPLE_OMP_SINGLE
:
5616 expand_omp_single (region
);
5619 case GIMPLE_OMP_MASTER
:
5620 case GIMPLE_OMP_ORDERED
:
5621 case GIMPLE_OMP_CRITICAL
:
5622 expand_omp_synch (region
);
5625 case GIMPLE_OMP_ATOMIC_LOAD
:
5626 expand_omp_atomic (region
);
5633 input_location
= saved_location
;
5634 region
= region
->next
;
5639 /* Helper for build_omp_regions. Scan the dominator tree starting at
5640 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5641 true, the function ends once a single tree is built (otherwise, whole
5642 forest of OMP constructs may be built). */
5645 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
5648 gimple_stmt_iterator gsi
;
5652 gsi
= gsi_last_bb (bb
);
5653 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
5655 struct omp_region
*region
;
5656 enum gimple_code code
;
5658 stmt
= gsi_stmt (gsi
);
5659 code
= gimple_code (stmt
);
5660 if (code
== GIMPLE_OMP_RETURN
)
5662 /* STMT is the return point out of region PARENT. Mark it
5663 as the exit point and make PARENT the immediately
5664 enclosing region. */
5665 gcc_assert (parent
);
5668 parent
= parent
->outer
;
5670 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
5672 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5673 GIMPLE_OMP_RETURN, but matches with
5674 GIMPLE_OMP_ATOMIC_LOAD. */
5675 gcc_assert (parent
);
5676 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
5679 parent
= parent
->outer
;
5682 else if (code
== GIMPLE_OMP_CONTINUE
)
5684 gcc_assert (parent
);
5687 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
5689 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5690 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5695 /* Otherwise, this directive becomes the parent for a new
5697 region
= new_omp_region (bb
, code
, parent
);
5702 if (single_tree
&& !parent
)
5705 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
5707 son
= next_dom_son (CDI_DOMINATORS
, son
))
5708 build_omp_regions_1 (son
, parent
, single_tree
);
5711 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5715 build_omp_regions_root (basic_block root
)
5717 gcc_assert (root_omp_region
== NULL
);
5718 build_omp_regions_1 (root
, NULL
, true);
5719 gcc_assert (root_omp_region
!= NULL
);
5722 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5725 omp_expand_local (basic_block head
)
5727 build_omp_regions_root (head
);
5728 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5730 fprintf (dump_file
, "\nOMP region tree\n\n");
5731 dump_omp_region (dump_file
, root_omp_region
, 0);
5732 fprintf (dump_file
, "\n");
5735 remove_exit_barriers (root_omp_region
);
5736 expand_omp (root_omp_region
);
5738 free_omp_regions ();
5741 /* Scan the CFG and build a tree of OMP regions. Return the root of
5742 the OMP region tree. */
5745 build_omp_regions (void)
5747 gcc_assert (root_omp_region
== NULL
);
5748 calculate_dominance_info (CDI_DOMINATORS
);
5749 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
5752 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5755 execute_expand_omp (void)
5757 build_omp_regions ();
5759 if (!root_omp_region
)
5764 fprintf (dump_file
, "\nOMP region tree\n\n");
5765 dump_omp_region (dump_file
, root_omp_region
, 0);
5766 fprintf (dump_file
, "\n");
5769 remove_exit_barriers (root_omp_region
);
5771 expand_omp (root_omp_region
);
5773 cleanup_tree_cfg ();
5775 free_omp_regions ();
5780 /* OMP expansion -- the default pass, run before creation of SSA form. */
5783 gate_expand_omp (void)
5785 return (flag_openmp
!= 0 && !seen_error ());
5788 struct gimple_opt_pass pass_expand_omp
=
5792 "ompexp", /* name */
5793 OPTGROUP_NONE
, /* optinfo_flags */
5794 gate_expand_omp
, /* gate */
5795 execute_expand_omp
, /* execute */
5798 0, /* static_pass_number */
5799 TV_NONE
, /* tv_id */
5800 PROP_gimple_any
, /* properties_required */
5801 0, /* properties_provided */
5802 0, /* properties_destroyed */
5803 0, /* todo_flags_start */
5804 0 /* todo_flags_finish */
5808 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5810 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5811 CTX is the enclosing OMP context for the current statement. */
5814 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5816 tree block
, control
;
5817 gimple_stmt_iterator tgsi
;
5818 gimple stmt
, new_stmt
, bind
, t
;
5819 gimple_seq ilist
, dlist
, olist
, new_body
;
5820 struct gimplify_ctx gctx
;
5822 stmt
= gsi_stmt (*gsi_p
);
5824 push_gimplify_context (&gctx
);
5828 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
5829 &ilist
, &dlist
, ctx
);
5831 new_body
= gimple_omp_body (stmt
);
5832 gimple_omp_set_body (stmt
, NULL
);
5833 tgsi
= gsi_start (new_body
);
5834 for (; !gsi_end_p (tgsi
); gsi_next (&tgsi
))
5839 sec_start
= gsi_stmt (tgsi
);
5840 sctx
= maybe_lookup_ctx (sec_start
);
5843 lower_omp (gimple_omp_body_ptr (sec_start
), sctx
);
5844 gsi_insert_seq_after (&tgsi
, gimple_omp_body (sec_start
),
5845 GSI_CONTINUE_LINKING
);
5846 gimple_omp_set_body (sec_start
, NULL
);
5848 if (gsi_one_before_end_p (tgsi
))
5850 gimple_seq l
= NULL
;
5851 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
5853 gsi_insert_seq_after (&tgsi
, l
, GSI_CONTINUE_LINKING
);
5854 gimple_omp_section_set_last (sec_start
);
5857 gsi_insert_after (&tgsi
, gimple_build_omp_return (false),
5858 GSI_CONTINUE_LINKING
);
5861 block
= make_node (BLOCK
);
5862 bind
= gimple_build_bind (NULL
, new_body
, block
);
5865 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
5867 block
= make_node (BLOCK
);
5868 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
5869 gsi_replace (gsi_p
, new_stmt
, true);
5871 pop_gimplify_context (new_stmt
);
5872 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
5873 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5874 if (BLOCK_VARS (block
))
5875 TREE_USED (block
) = 1;
5878 gimple_seq_add_seq (&new_body
, ilist
);
5879 gimple_seq_add_stmt (&new_body
, stmt
);
5880 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
5881 gimple_seq_add_stmt (&new_body
, bind
);
5883 control
= create_tmp_var (unsigned_type_node
, ".section");
5884 t
= gimple_build_omp_continue (control
, control
);
5885 gimple_omp_sections_set_control (stmt
, control
);
5886 gimple_seq_add_stmt (&new_body
, t
);
5888 gimple_seq_add_seq (&new_body
, olist
);
5889 gimple_seq_add_seq (&new_body
, dlist
);
5891 new_body
= maybe_catch_exception (new_body
);
5893 t
= gimple_build_omp_return
5894 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
5895 OMP_CLAUSE_NOWAIT
));
5896 gimple_seq_add_stmt (&new_body
, t
);
5898 gimple_bind_set_body (new_stmt
, new_body
);
5902 /* A subroutine of lower_omp_single. Expand the simple form of
5903 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5905 if (GOMP_single_start ())
5907 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5909 FIXME. It may be better to delay expanding the logic of this until
5910 pass_expand_omp. The expanded logic may make the job more difficult
5911 to a synchronization analysis pass. */
5914 lower_omp_single_simple (gimple single_stmt
, gimple_seq
*pre_p
)
5916 location_t loc
= gimple_location (single_stmt
);
5917 tree tlabel
= create_artificial_label (loc
);
5918 tree flabel
= create_artificial_label (loc
);
5922 decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START
);
5923 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)), NULL
);
5924 call
= gimple_build_call (decl
, 0);
5925 gimple_call_set_lhs (call
, lhs
);
5926 gimple_seq_add_stmt (pre_p
, call
);
5928 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
5929 fold_convert_loc (loc
, TREE_TYPE (lhs
),
5932 gimple_seq_add_stmt (pre_p
, cond
);
5933 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
5934 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5935 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
5939 /* A subroutine of lower_omp_single. Expand the simple form of
5940 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5942 #pragma omp single copyprivate (a, b, c)
5944 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5947 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5953 GOMP_single_copy_end (©out);
5964 FIXME. It may be better to delay expanding the logic of this until
5965 pass_expand_omp. The expanded logic may make the job more difficult
5966 to a synchronization analysis pass. */
5969 lower_omp_single_copy (gimple single_stmt
, gimple_seq
*pre_p
, omp_context
*ctx
)
5971 tree ptr_type
, t
, l0
, l1
, l2
, bfn_decl
;
5972 gimple_seq copyin_seq
;
5973 location_t loc
= gimple_location (single_stmt
);
5975 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
5977 ptr_type
= build_pointer_type (ctx
->record_type
);
5978 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
5980 l0
= create_artificial_label (loc
);
5981 l1
= create_artificial_label (loc
);
5982 l2
= create_artificial_label (loc
);
5984 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START
);
5985 t
= build_call_expr_loc (loc
, bfn_decl
, 0);
5986 t
= fold_convert_loc (loc
, ptr_type
, t
);
5987 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
5989 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
5990 build_int_cst (ptr_type
, 0));
5991 t
= build3 (COND_EXPR
, void_type_node
, t
,
5992 build_and_jump (&l0
), build_and_jump (&l1
));
5993 gimplify_and_add (t
, pre_p
);
5995 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
5997 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
6000 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
6003 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
6004 bfn_decl
= builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END
);
6005 t
= build_call_expr_loc (loc
, bfn_decl
, 1, t
);
6006 gimplify_and_add (t
, pre_p
);
6008 t
= build_and_jump (&l2
);
6009 gimplify_and_add (t
, pre_p
);
6011 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
6013 gimple_seq_add_seq (pre_p
, copyin_seq
);
6015 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
6019 /* Expand code for an OpenMP single directive. */
6022 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6025 gimple t
, bind
, single_stmt
= gsi_stmt (*gsi_p
);
6026 gimple_seq bind_body
, dlist
;
6027 struct gimplify_ctx gctx
;
6029 push_gimplify_context (&gctx
);
6031 block
= make_node (BLOCK
);
6032 bind
= gimple_build_bind (NULL
, NULL
, block
);
6033 gsi_replace (gsi_p
, bind
, true);
6036 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
6037 &bind_body
, &dlist
, ctx
);
6038 lower_omp (gimple_omp_body_ptr (single_stmt
), ctx
);
6040 gimple_seq_add_stmt (&bind_body
, single_stmt
);
6042 if (ctx
->record_type
)
6043 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
6045 lower_omp_single_simple (single_stmt
, &bind_body
);
6047 gimple_omp_set_body (single_stmt
, NULL
);
6049 gimple_seq_add_seq (&bind_body
, dlist
);
6051 bind_body
= maybe_catch_exception (bind_body
);
6053 t
= gimple_build_omp_return
6054 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
6055 OMP_CLAUSE_NOWAIT
));
6056 gimple_seq_add_stmt (&bind_body
, t
);
6057 gimple_bind_set_body (bind
, bind_body
);
6059 pop_gimplify_context (bind
);
6061 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6062 BLOCK_VARS (block
) = ctx
->block_vars
;
6063 if (BLOCK_VARS (block
))
6064 TREE_USED (block
) = 1;
6068 /* Expand code for an OpenMP master directive. */
6071 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6073 tree block
, lab
= NULL
, x
, bfn_decl
;
6074 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
6075 location_t loc
= gimple_location (stmt
);
6077 struct gimplify_ctx gctx
;
6079 push_gimplify_context (&gctx
);
6081 block
= make_node (BLOCK
);
6082 bind
= gimple_build_bind (NULL
, NULL
, block
);
6083 gsi_replace (gsi_p
, bind
, true);
6084 gimple_bind_add_stmt (bind
, stmt
);
6086 bfn_decl
= builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM
);
6087 x
= build_call_expr_loc (loc
, bfn_decl
, 0);
6088 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
6089 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
6091 gimplify_and_add (x
, &tseq
);
6092 gimple_bind_add_seq (bind
, tseq
);
6094 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
6095 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6096 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6097 gimple_omp_set_body (stmt
, NULL
);
6099 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
6101 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6103 pop_gimplify_context (bind
);
6105 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6106 BLOCK_VARS (block
) = ctx
->block_vars
;
6110 /* Expand code for an OpenMP ordered directive. */
6113 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6116 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
6117 struct gimplify_ctx gctx
;
6119 push_gimplify_context (&gctx
);
6121 block
= make_node (BLOCK
);
6122 bind
= gimple_build_bind (NULL
, NULL
, block
);
6123 gsi_replace (gsi_p
, bind
, true);
6124 gimple_bind_add_stmt (bind
, stmt
);
6126 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START
),
6128 gimple_bind_add_stmt (bind
, x
);
6130 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
6131 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6132 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6133 gimple_omp_set_body (stmt
, NULL
);
6135 x
= gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END
), 0);
6136 gimple_bind_add_stmt (bind
, x
);
6138 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6140 pop_gimplify_context (bind
);
6142 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6143 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
6147 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6148 substitution of a couple of function calls. But in the NAMED case,
6149 requires that languages coordinate a symbol name. It is therefore
6150 best put here in common code. */
6152 static GTY((param1_is (tree
), param2_is (tree
)))
6153 splay_tree critical_name_mutexes
;
6156 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6159 tree name
, lock
, unlock
;
6160 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
6161 location_t loc
= gimple_location (stmt
);
6163 struct gimplify_ctx gctx
;
6165 name
= gimple_omp_critical_name (stmt
);
6171 if (!critical_name_mutexes
)
6172 critical_name_mutexes
6173 = splay_tree_new_ggc (splay_tree_compare_pointers
,
6174 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s
,
6175 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s
);
6177 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
6182 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
6184 new_str
= ACONCAT ((".gomp_critical_user_",
6185 IDENTIFIER_POINTER (name
), NULL
));
6186 DECL_NAME (decl
) = get_identifier (new_str
);
6187 TREE_PUBLIC (decl
) = 1;
6188 TREE_STATIC (decl
) = 1;
6189 DECL_COMMON (decl
) = 1;
6190 DECL_ARTIFICIAL (decl
) = 1;
6191 DECL_IGNORED_P (decl
) = 1;
6192 varpool_finalize_decl (decl
);
6194 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
6195 (splay_tree_value
) decl
);
6198 decl
= (tree
) n
->value
;
6200 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START
);
6201 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
6203 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END
);
6204 unlock
= build_call_expr_loc (loc
, unlock
, 1,
6205 build_fold_addr_expr_loc (loc
, decl
));
6209 lock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START
);
6210 lock
= build_call_expr_loc (loc
, lock
, 0);
6212 unlock
= builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END
);
6213 unlock
= build_call_expr_loc (loc
, unlock
, 0);
6216 push_gimplify_context (&gctx
);
6218 block
= make_node (BLOCK
);
6219 bind
= gimple_build_bind (NULL
, NULL
, block
);
6220 gsi_replace (gsi_p
, bind
, true);
6221 gimple_bind_add_stmt (bind
, stmt
);
6223 tbody
= gimple_bind_body (bind
);
6224 gimplify_and_add (lock
, &tbody
);
6225 gimple_bind_set_body (bind
, tbody
);
6227 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
6228 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
6229 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
6230 gimple_omp_set_body (stmt
, NULL
);
6232 tbody
= gimple_bind_body (bind
);
6233 gimplify_and_add (unlock
, &tbody
);
6234 gimple_bind_set_body (bind
, tbody
);
6236 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
6238 pop_gimplify_context (bind
);
6239 gimple_bind_append_vars (bind
, ctx
->block_vars
);
6240 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
6244 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6245 for a lastprivate clause. Given a loop control predicate of (V
6246 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6247 is appended to *DLIST, iterator initialization is appended to
6251 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
6252 gimple_seq
*dlist
, struct omp_context
*ctx
)
6254 tree clauses
, cond
, vinit
;
6255 enum tree_code cond_code
;
6258 cond_code
= fd
->loop
.cond_code
;
6259 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
6261 /* When possible, use a strict equality expression. This can let VRP
6262 type optimizations deduce the value and remove a copy. */
6263 if (host_integerp (fd
->loop
.step
, 0))
6265 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->loop
.step
);
6266 if (step
== 1 || step
== -1)
6267 cond_code
= EQ_EXPR
;
6270 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, fd
->loop
.n2
);
6272 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
6274 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
6275 if (!gimple_seq_empty_p (stmts
))
6277 gimple_seq_add_seq (&stmts
, *dlist
);
6280 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6281 vinit
= fd
->loop
.n1
;
6282 if (cond_code
== EQ_EXPR
6283 && host_integerp (fd
->loop
.n2
, 0)
6284 && ! integer_zerop (fd
->loop
.n2
))
6285 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
6287 /* Initialize the iterator variable, so that threads that don't execute
6288 any iterations don't execute the lastprivate clauses by accident. */
6289 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
6294 /* Lower code for an OpenMP loop directive. */
6297 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6300 struct omp_for_data fd
;
6301 gimple stmt
= gsi_stmt (*gsi_p
), new_stmt
;
6302 gimple_seq omp_for_body
, body
, dlist
;
6304 struct gimplify_ctx gctx
;
6306 push_gimplify_context (&gctx
);
6308 lower_omp (gimple_omp_for_pre_body_ptr (stmt
), ctx
);
6309 lower_omp (gimple_omp_body_ptr (stmt
), ctx
);
6311 block
= make_node (BLOCK
);
6312 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
6313 /* Replace at gsi right away, so that 'stmt' is no member
6314 of a sequence anymore as we're going to add to to a different
6316 gsi_replace (gsi_p
, new_stmt
, true);
6318 /* Move declaration of temporaries in the loop body before we make
6320 omp_for_body
= gimple_omp_body (stmt
);
6321 if (!gimple_seq_empty_p (omp_for_body
)
6322 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
6324 tree vars
= gimple_bind_vars (gimple_seq_first_stmt (omp_for_body
));
6325 gimple_bind_append_vars (new_stmt
, vars
);
6328 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6331 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
);
6332 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
6334 /* Lower the header expressions. At this point, we can assume that
6335 the header is of the form:
6337 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6339 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6340 using the .omp_data_s mapping, if needed. */
6341 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
6343 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
6344 if (!is_gimple_min_invariant (*rhs_p
))
6345 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6347 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
6348 if (!is_gimple_min_invariant (*rhs_p
))
6349 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6351 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
6352 if (!is_gimple_min_invariant (*rhs_p
))
6353 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6356 /* Once lowered, extract the bounds and clauses. */
6357 extract_omp_for_data (stmt
, &fd
, NULL
);
6359 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
6361 gimple_seq_add_stmt (&body
, stmt
);
6362 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
6364 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
6367 /* After the loop, add exit clauses. */
6368 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
6369 gimple_seq_add_seq (&body
, dlist
);
6371 body
= maybe_catch_exception (body
);
6373 /* Region exit marker goes at the end of the loop body. */
6374 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
6376 pop_gimplify_context (new_stmt
);
6378 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
6379 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
6380 if (BLOCK_VARS (block
))
6381 TREE_USED (block
) = 1;
6383 gimple_bind_set_body (new_stmt
, body
);
6384 gimple_omp_set_body (stmt
, NULL
);
6385 gimple_omp_for_set_pre_body (stmt
, NULL
);
6388 /* Callback for walk_stmts. Check if the current statement only contains
6389 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6392 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
6393 bool *handled_ops_p
,
6394 struct walk_stmt_info
*wi
)
6396 int *info
= (int *) wi
->info
;
6397 gimple stmt
= gsi_stmt (*gsi_p
);
6399 *handled_ops_p
= true;
6400 switch (gimple_code (stmt
))
6404 case GIMPLE_OMP_FOR
:
6405 case GIMPLE_OMP_SECTIONS
:
6406 *info
= *info
== 0 ? 1 : -1;
6415 struct omp_taskcopy_context
6417 /* This field must be at the beginning, as we do "inheritance": Some
6418 callback functions for tree-inline.c (e.g., omp_copy_decl)
6419 receive a copy_body_data pointer that is up-casted to an
6420 omp_context pointer. */
6426 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
6428 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
6430 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
6431 return create_tmp_var (TREE_TYPE (var
), NULL
);
6437 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
6439 tree name
, new_fields
= NULL
, type
, f
;
6441 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6442 name
= DECL_NAME (TYPE_NAME (orig_type
));
6443 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
6444 TYPE_DECL
, name
, type
);
6445 TYPE_NAME (type
) = name
;
6447 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
6449 tree new_f
= copy_node (f
);
6450 DECL_CONTEXT (new_f
) = type
;
6451 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
6452 TREE_CHAIN (new_f
) = new_fields
;
6453 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6454 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6455 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
6458 *pointer_map_insert (tcctx
->cb
.decl_map
, f
) = new_f
;
6460 TYPE_FIELDS (type
) = nreverse (new_fields
);
6465 /* Create task copyfn. */
6468 create_task_copyfn (gimple task_stmt
, omp_context
*ctx
)
6470 struct function
*child_cfun
;
6471 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
6472 tree record_type
, srecord_type
, bind
, list
;
6473 bool record_needs_remap
= false, srecord_needs_remap
= false;
6475 struct omp_taskcopy_context tcctx
;
6476 struct gimplify_ctx gctx
;
6477 location_t loc
= gimple_location (task_stmt
);
6479 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
6480 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
6481 gcc_assert (child_cfun
->cfg
== NULL
);
6482 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
6484 /* Reset DECL_CONTEXT on function arguments. */
6485 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
6486 DECL_CONTEXT (t
) = child_fn
;
6488 /* Populate the function. */
6489 push_gimplify_context (&gctx
);
6490 push_cfun (child_cfun
);
6492 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
6493 TREE_SIDE_EFFECTS (bind
) = 1;
6495 DECL_SAVED_TREE (child_fn
) = bind
;
6496 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
6498 /* Remap src and dst argument types if needed. */
6499 record_type
= ctx
->record_type
;
6500 srecord_type
= ctx
->srecord_type
;
6501 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
6502 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6504 record_needs_remap
= true;
6507 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= DECL_CHAIN (f
))
6508 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6510 srecord_needs_remap
= true;
6514 if (record_needs_remap
|| srecord_needs_remap
)
6516 memset (&tcctx
, '\0', sizeof (tcctx
));
6517 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
6518 tcctx
.cb
.dst_fn
= child_fn
;
6519 tcctx
.cb
.src_node
= cgraph_get_node (tcctx
.cb
.src_fn
);
6520 gcc_checking_assert (tcctx
.cb
.src_node
);
6521 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
6522 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
6523 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
6524 tcctx
.cb
.eh_lp_nr
= 0;
6525 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
6526 tcctx
.cb
.decl_map
= pointer_map_create ();
6529 if (record_needs_remap
)
6530 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
6531 if (srecord_needs_remap
)
6532 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
6535 tcctx
.cb
.decl_map
= NULL
;
6537 arg
= DECL_ARGUMENTS (child_fn
);
6538 TREE_TYPE (arg
) = build_pointer_type (record_type
);
6539 sarg
= DECL_CHAIN (arg
);
6540 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
6542 /* First pass: initialize temporaries used in record_type and srecord_type
6543 sizes and field offsets. */
6544 if (tcctx
.cb
.decl_map
)
6545 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6546 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6550 decl
= OMP_CLAUSE_DECL (c
);
6551 p
= (tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, decl
);
6554 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6555 sf
= (tree
) n
->value
;
6556 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6557 src
= build_simple_mem_ref_loc (loc
, sarg
);
6558 src
= omp_build_component_ref (src
, sf
);
6559 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
6560 append_to_statement_list (t
, &list
);
6563 /* Second pass: copy shared var pointers and copy construct non-VLA
6564 firstprivate vars. */
6565 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6566 switch (OMP_CLAUSE_CODE (c
))
6568 case OMP_CLAUSE_SHARED
:
6569 decl
= OMP_CLAUSE_DECL (c
);
6570 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6573 f
= (tree
) n
->value
;
6574 if (tcctx
.cb
.decl_map
)
6575 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6576 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6577 sf
= (tree
) n
->value
;
6578 if (tcctx
.cb
.decl_map
)
6579 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6580 src
= build_simple_mem_ref_loc (loc
, sarg
);
6581 src
= omp_build_component_ref (src
, sf
);
6582 dst
= build_simple_mem_ref_loc (loc
, arg
);
6583 dst
= omp_build_component_ref (dst
, f
);
6584 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6585 append_to_statement_list (t
, &list
);
6587 case OMP_CLAUSE_FIRSTPRIVATE
:
6588 decl
= OMP_CLAUSE_DECL (c
);
6589 if (is_variable_sized (decl
))
6591 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6594 f
= (tree
) n
->value
;
6595 if (tcctx
.cb
.decl_map
)
6596 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6597 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6600 sf
= (tree
) n
->value
;
6601 if (tcctx
.cb
.decl_map
)
6602 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6603 src
= build_simple_mem_ref_loc (loc
, sarg
);
6604 src
= omp_build_component_ref (src
, sf
);
6605 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
6606 src
= build_simple_mem_ref_loc (loc
, src
);
6610 dst
= build_simple_mem_ref_loc (loc
, arg
);
6611 dst
= omp_build_component_ref (dst
, f
);
6612 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6613 append_to_statement_list (t
, &list
);
6615 case OMP_CLAUSE_PRIVATE
:
6616 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
6618 decl
= OMP_CLAUSE_DECL (c
);
6619 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6620 f
= (tree
) n
->value
;
6621 if (tcctx
.cb
.decl_map
)
6622 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6623 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6626 sf
= (tree
) n
->value
;
6627 if (tcctx
.cb
.decl_map
)
6628 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6629 src
= build_simple_mem_ref_loc (loc
, sarg
);
6630 src
= omp_build_component_ref (src
, sf
);
6631 if (use_pointer_for_field (decl
, NULL
))
6632 src
= build_simple_mem_ref_loc (loc
, src
);
6636 dst
= build_simple_mem_ref_loc (loc
, arg
);
6637 dst
= omp_build_component_ref (dst
, f
);
6638 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6639 append_to_statement_list (t
, &list
);
6645 /* Last pass: handle VLA firstprivates. */
6646 if (tcctx
.cb
.decl_map
)
6647 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6648 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6652 decl
= OMP_CLAUSE_DECL (c
);
6653 if (!is_variable_sized (decl
))
6655 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6658 f
= (tree
) n
->value
;
6659 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6660 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
6661 ind
= DECL_VALUE_EXPR (decl
);
6662 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
6663 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
6664 n
= splay_tree_lookup (ctx
->sfield_map
,
6665 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6666 sf
= (tree
) n
->value
;
6667 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6668 src
= build_simple_mem_ref_loc (loc
, sarg
);
6669 src
= omp_build_component_ref (src
, sf
);
6670 src
= build_simple_mem_ref_loc (loc
, src
);
6671 dst
= build_simple_mem_ref_loc (loc
, arg
);
6672 dst
= omp_build_component_ref (dst
, f
);
6673 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6674 append_to_statement_list (t
, &list
);
6675 n
= splay_tree_lookup (ctx
->field_map
,
6676 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6677 df
= (tree
) n
->value
;
6678 df
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, df
);
6679 ptr
= build_simple_mem_ref_loc (loc
, arg
);
6680 ptr
= omp_build_component_ref (ptr
, df
);
6681 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
6682 build_fold_addr_expr_loc (loc
, dst
));
6683 append_to_statement_list (t
, &list
);
6686 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
6687 append_to_statement_list (t
, &list
);
6689 if (tcctx
.cb
.decl_map
)
6690 pointer_map_destroy (tcctx
.cb
.decl_map
);
6691 pop_gimplify_context (NULL
);
6692 BIND_EXPR_BODY (bind
) = list
;
6696 /* Lower the OpenMP parallel or task directive in the current statement
6697 in GSI_P. CTX holds context information for the directive. */
6700 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6704 gimple stmt
= gsi_stmt (*gsi_p
);
6705 gimple par_bind
, bind
;
6706 gimple_seq par_body
, olist
, ilist
, par_olist
, par_ilist
, new_body
;
6707 struct gimplify_ctx gctx
;
6708 location_t loc
= gimple_location (stmt
);
6710 clauses
= gimple_omp_taskreg_clauses (stmt
);
6711 par_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
6712 par_body
= gimple_bind_body (par_bind
);
6713 child_fn
= ctx
->cb
.dst_fn
;
6714 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
6715 && !gimple_omp_parallel_combined_p (stmt
))
6717 struct walk_stmt_info wi
;
6720 memset (&wi
, 0, sizeof (wi
));
6723 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
6725 gimple_omp_parallel_set_combined_p (stmt
, true);
6727 if (ctx
->srecord_type
)
6728 create_task_copyfn (stmt
, ctx
);
6730 push_gimplify_context (&gctx
);
6734 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
6735 lower_omp (&par_body
, ctx
);
6736 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
6737 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
6739 /* Declare all the variables created by mapping and the variables
6740 declared in the scope of the parallel body. */
6741 record_vars_into (ctx
->block_vars
, child_fn
);
6742 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
6744 if (ctx
->record_type
)
6747 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
6748 : ctx
->record_type
, ".omp_data_o");
6749 DECL_NAMELESS (ctx
->sender_decl
) = 1;
6750 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
6751 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
6756 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
6757 lower_send_shared_vars (&ilist
, &olist
, ctx
);
6759 /* Once all the expansions are done, sequence all the different
6760 fragments inside gimple_omp_body. */
6764 if (ctx
->record_type
)
6766 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
6767 /* fixup_child_record_type might have changed receiver_decl's type. */
6768 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
6769 gimple_seq_add_stmt (&new_body
,
6770 gimple_build_assign (ctx
->receiver_decl
, t
));
6773 gimple_seq_add_seq (&new_body
, par_ilist
);
6774 gimple_seq_add_seq (&new_body
, par_body
);
6775 gimple_seq_add_seq (&new_body
, par_olist
);
6776 new_body
= maybe_catch_exception (new_body
);
6777 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
6778 gimple_omp_set_body (stmt
, new_body
);
6780 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
6781 gsi_replace (gsi_p
, bind
, true);
6782 gimple_bind_add_seq (bind
, ilist
);
6783 gimple_bind_add_stmt (bind
, stmt
);
6784 gimple_bind_add_seq (bind
, olist
);
6786 pop_gimplify_context (NULL
);
6789 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6790 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6791 of OpenMP context, but with task_shared_vars set. */
6794 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
6799 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6800 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
6803 if (task_shared_vars
6805 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
6808 /* If a global variable has been privatized, TREE_CONSTANT on
6809 ADDR_EXPR might be wrong. */
6810 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
6811 recompute_tree_invariant_for_addr_expr (t
);
6813 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
6818 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6820 gimple stmt
= gsi_stmt (*gsi_p
);
6821 struct walk_stmt_info wi
;
6823 if (gimple_has_location (stmt
))
6824 input_location
= gimple_location (stmt
);
6826 if (task_shared_vars
)
6827 memset (&wi
, '\0', sizeof (wi
));
6829 /* If we have issued syntax errors, avoid doing any heavy lifting.
6830 Just replace the OpenMP directives with a NOP to avoid
6831 confusing RTL expansion. */
6832 if (seen_error () && is_gimple_omp (stmt
))
6834 gsi_replace (gsi_p
, gimple_build_nop (), true);
6838 switch (gimple_code (stmt
))
6841 if ((ctx
|| task_shared_vars
)
6842 && (walk_tree (gimple_cond_lhs_ptr (stmt
), lower_omp_regimplify_p
,
6843 ctx
? NULL
: &wi
, NULL
)
6844 || walk_tree (gimple_cond_rhs_ptr (stmt
), lower_omp_regimplify_p
,
6845 ctx
? NULL
: &wi
, NULL
)))
6846 gimple_regimplify_operands (stmt
, gsi_p
);
6849 lower_omp (gimple_catch_handler_ptr (stmt
), ctx
);
6851 case GIMPLE_EH_FILTER
:
6852 lower_omp (gimple_eh_filter_failure_ptr (stmt
), ctx
);
6855 lower_omp (gimple_try_eval_ptr (stmt
), ctx
);
6856 lower_omp (gimple_try_cleanup_ptr (stmt
), ctx
);
6858 case GIMPLE_TRANSACTION
:
6859 lower_omp (gimple_transaction_body_ptr (stmt
), ctx
);
6862 lower_omp (gimple_bind_body_ptr (stmt
), ctx
);
6864 case GIMPLE_OMP_PARALLEL
:
6865 case GIMPLE_OMP_TASK
:
6866 ctx
= maybe_lookup_ctx (stmt
);
6867 lower_omp_taskreg (gsi_p
, ctx
);
6869 case GIMPLE_OMP_FOR
:
6870 ctx
= maybe_lookup_ctx (stmt
);
6872 lower_omp_for (gsi_p
, ctx
);
6874 case GIMPLE_OMP_SECTIONS
:
6875 ctx
= maybe_lookup_ctx (stmt
);
6877 lower_omp_sections (gsi_p
, ctx
);
6879 case GIMPLE_OMP_SINGLE
:
6880 ctx
= maybe_lookup_ctx (stmt
);
6882 lower_omp_single (gsi_p
, ctx
);
6884 case GIMPLE_OMP_MASTER
:
6885 ctx
= maybe_lookup_ctx (stmt
);
6887 lower_omp_master (gsi_p
, ctx
);
6889 case GIMPLE_OMP_ORDERED
:
6890 ctx
= maybe_lookup_ctx (stmt
);
6892 lower_omp_ordered (gsi_p
, ctx
);
6894 case GIMPLE_OMP_CRITICAL
:
6895 ctx
= maybe_lookup_ctx (stmt
);
6897 lower_omp_critical (gsi_p
, ctx
);
6899 case GIMPLE_OMP_ATOMIC_LOAD
:
6900 if ((ctx
|| task_shared_vars
)
6901 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt
),
6902 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
6903 gimple_regimplify_operands (stmt
, gsi_p
);
6906 if ((ctx
|| task_shared_vars
)
6907 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
6909 gimple_regimplify_operands (stmt
, gsi_p
);
6915 lower_omp (gimple_seq
*body
, omp_context
*ctx
)
6917 location_t saved_location
= input_location
;
6918 gimple_stmt_iterator gsi
;
6919 for (gsi
= gsi_start (*body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6920 lower_omp_1 (&gsi
, ctx
);
6921 input_location
= saved_location
;
6924 /* Main entry point. */
6927 execute_lower_omp (void)
6931 /* This pass always runs, to provide PROP_gimple_lomp.
6932 But there is nothing to do unless -fopenmp is given. */
6933 if (flag_openmp
== 0)
6936 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
6937 delete_omp_context
);
6939 body
= gimple_body (current_function_decl
);
6940 scan_omp (&body
, NULL
);
6941 gcc_assert (taskreg_nesting_level
== 0);
6943 if (all_contexts
->root
)
6945 struct gimplify_ctx gctx
;
6947 if (task_shared_vars
)
6948 push_gimplify_context (&gctx
);
6949 lower_omp (&body
, NULL
);
6950 if (task_shared_vars
)
6951 pop_gimplify_context (NULL
);
6956 splay_tree_delete (all_contexts
);
6957 all_contexts
= NULL
;
6959 BITMAP_FREE (task_shared_vars
);
6963 struct gimple_opt_pass pass_lower_omp
=
6967 "omplower", /* name */
6968 OPTGROUP_NONE
, /* optinfo_flags */
6970 execute_lower_omp
, /* execute */
6973 0, /* static_pass_number */
6974 TV_NONE
, /* tv_id */
6975 PROP_gimple_any
, /* properties_required */
6976 PROP_gimple_lomp
, /* properties_provided */
6977 0, /* properties_destroyed */
6978 0, /* todo_flags_start */
6979 0 /* todo_flags_finish */
6983 /* The following is a utility to diagnose OpenMP structured block violations.
6984 It is not part of the "omplower" pass, as that's invoked too late. It
6985 should be invoked by the respective front ends after gimplification. */
6987 static splay_tree all_labels
;
6989 /* Check for mismatched contexts and generate an error if needed. Return
6990 true if an error is detected. */
6993 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
6994 gimple branch_ctx
, gimple label_ctx
)
6996 if (label_ctx
== branch_ctx
)
7001 Previously we kept track of the label's entire context in diagnose_sb_[12]
7002 so we could traverse it and issue a correct "exit" or "enter" error
7003 message upon a structured block violation.
7005 We built the context by building a list with tree_cons'ing, but there is
7006 no easy counterpart in gimple tuples. It seems like far too much work
7007 for issuing exit/enter error messages. If someone really misses the
7008 distinct error message... patches welcome.
7012 /* Try to avoid confusing the user by producing and error message
7013 with correct "exit" or "enter" verbiage. We prefer "exit"
7014 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
7015 if (branch_ctx
== NULL
)
7021 if (TREE_VALUE (label_ctx
) == branch_ctx
)
7026 label_ctx
= TREE_CHAIN (label_ctx
);
7031 error ("invalid exit from OpenMP structured block");
7033 error ("invalid entry to OpenMP structured block");
7036 /* If it's obvious we have an invalid entry, be specific about the error. */
7037 if (branch_ctx
== NULL
)
7038 error ("invalid entry to OpenMP structured block");
7040 /* Otherwise, be vague and lazy, but efficient. */
7041 error ("invalid branch to/from an OpenMP structured block");
7043 gsi_replace (gsi_p
, gimple_build_nop (), false);
7047 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7048 where each label is found. */
7051 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
7052 struct walk_stmt_info
*wi
)
7054 gimple context
= (gimple
) wi
->info
;
7055 gimple inner_context
;
7056 gimple stmt
= gsi_stmt (*gsi_p
);
7058 *handled_ops_p
= true;
7060 switch (gimple_code (stmt
))
7064 case GIMPLE_OMP_PARALLEL
:
7065 case GIMPLE_OMP_TASK
:
7066 case GIMPLE_OMP_SECTIONS
:
7067 case GIMPLE_OMP_SINGLE
:
7068 case GIMPLE_OMP_SECTION
:
7069 case GIMPLE_OMP_MASTER
:
7070 case GIMPLE_OMP_ORDERED
:
7071 case GIMPLE_OMP_CRITICAL
:
7072 /* The minimal context here is just the current OMP construct. */
7073 inner_context
= stmt
;
7074 wi
->info
= inner_context
;
7075 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
7079 case GIMPLE_OMP_FOR
:
7080 inner_context
= stmt
;
7081 wi
->info
= inner_context
;
7082 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7084 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
7085 diagnose_sb_1
, NULL
, wi
);
7086 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
7091 splay_tree_insert (all_labels
, (splay_tree_key
) gimple_label_label (stmt
),
7092 (splay_tree_value
) context
);
7102 /* Pass 2: Check each branch and see if its context differs from that of
7103 the destination label's context. */
7106 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
7107 struct walk_stmt_info
*wi
)
7109 gimple context
= (gimple
) wi
->info
;
7111 gimple stmt
= gsi_stmt (*gsi_p
);
7113 *handled_ops_p
= true;
7115 switch (gimple_code (stmt
))
7119 case GIMPLE_OMP_PARALLEL
:
7120 case GIMPLE_OMP_TASK
:
7121 case GIMPLE_OMP_SECTIONS
:
7122 case GIMPLE_OMP_SINGLE
:
7123 case GIMPLE_OMP_SECTION
:
7124 case GIMPLE_OMP_MASTER
:
7125 case GIMPLE_OMP_ORDERED
:
7126 case GIMPLE_OMP_CRITICAL
:
7128 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
7132 case GIMPLE_OMP_FOR
:
7134 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7136 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt
),
7137 diagnose_sb_2
, NULL
, wi
);
7138 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt
), diagnose_sb_2
, NULL
, wi
);
7144 tree lab
= gimple_cond_true_label (stmt
);
7147 n
= splay_tree_lookup (all_labels
,
7148 (splay_tree_key
) lab
);
7149 diagnose_sb_0 (gsi_p
, context
,
7150 n
? (gimple
) n
->value
: NULL
);
7152 lab
= gimple_cond_false_label (stmt
);
7155 n
= splay_tree_lookup (all_labels
,
7156 (splay_tree_key
) lab
);
7157 diagnose_sb_0 (gsi_p
, context
,
7158 n
? (gimple
) n
->value
: NULL
);
7165 tree lab
= gimple_goto_dest (stmt
);
7166 if (TREE_CODE (lab
) != LABEL_DECL
)
7169 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
7170 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
7177 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
7179 tree lab
= CASE_LABEL (gimple_switch_label (stmt
, i
));
7180 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
7181 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
7188 diagnose_sb_0 (gsi_p
, context
, NULL
);
7199 diagnose_omp_structured_block_errors (void)
7201 struct walk_stmt_info wi
;
7202 gimple_seq body
= gimple_body (current_function_decl
);
7204 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
7206 memset (&wi
, 0, sizeof (wi
));
7207 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
7209 memset (&wi
, 0, sizeof (wi
));
7210 wi
.want_locations
= true;
7211 walk_gimple_seq_mod (&body
, diagnose_sb_2
, NULL
, &wi
);
7213 gimple_set_body (current_function_decl
, body
);
7215 splay_tree_delete (all_labels
);
7222 gate_diagnose_omp_blocks (void)
7224 return flag_openmp
!= 0;
7227 struct gimple_opt_pass pass_diagnose_omp_blocks
=
7231 "*diagnose_omp_blocks", /* name */
7232 OPTGROUP_NONE
, /* optinfo_flags */
7233 gate_diagnose_omp_blocks
, /* gate */
7234 diagnose_omp_structured_block_errors
, /* execute */
7237 0, /* static_pass_number */
7238 TV_NONE
, /* tv_id */
7239 PROP_gimple_any
, /* properties_required */
7240 0, /* properties_provided */
7241 0, /* properties_destroyed */
7242 0, /* todo_flags_start */
7243 0, /* todo_flags_finish */
7247 #include "gt-omp-low.h"