1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
41 #include "tree-pass.h"
44 #include "splay-tree.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context
*outer
;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map
;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
94 /* What to do with variables with implicitly determined sharing
96 enum omp_clause_default_kind default_kind
;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
103 /* True if this parallel directive is nested within another. */
108 struct omp_for_data_loop
110 tree v
, n1
, n2
, step
;
111 enum tree_code cond_code
;
114 /* A structure describing the main elements of a parallel loop. */
118 struct omp_for_data_loop loop
;
123 bool have_nowait
, have_ordered
;
124 enum omp_clause_schedule_kind sched_kind
;
125 struct omp_for_data_loop
*loops
;
129 static splay_tree all_contexts
;
130 static int taskreg_nesting_level
;
131 struct omp_region
*root_omp_region
;
132 static bitmap task_shared_vars
;
134 static void scan_omp (gimple_seq
, omp_context
*);
135 static tree
scan_omp_1_op (tree
*, int *, void *);
137 #define WALK_SUBSTMTS \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
149 scan_omp_op (tree
*tp
, omp_context
*ctx
)
151 struct walk_stmt_info wi
;
153 memset (&wi
, 0, sizeof (wi
));
155 wi
.want_locations
= true;
157 return walk_tree (tp
, scan_omp_1_op
, &wi
, NULL
);
160 static void lower_omp (gimple_seq
, omp_context
*);
161 static tree
lookup_decl_in_outer_ctx (tree
, omp_context
*);
162 static tree
maybe_lookup_decl_in_outer_ctx (tree
, omp_context
*);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
167 find_omp_clause (tree clauses
, enum omp_clause_code kind
)
169 for (; clauses
; clauses
= OMP_CLAUSE_CHAIN (clauses
))
170 if (OMP_CLAUSE_CODE (clauses
) == kind
)
176 /* Return true if CTX is for an omp parallel. */
179 is_parallel_ctx (omp_context
*ctx
)
181 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
;
185 /* Return true if CTX is for an omp task. */
188 is_task_ctx (omp_context
*ctx
)
190 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
194 /* Return true if CTX is for an omp parallel or omp task. */
197 is_taskreg_ctx (omp_context
*ctx
)
199 return gimple_code (ctx
->stmt
) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx
->stmt
) == GIMPLE_OMP_TASK
;
204 /* Return true if REGION is a combined parallel+workshare region. */
207 is_combined_parallel (struct omp_region
*region
)
209 return region
->is_combined_parallel
;
213 /* Extract the header elements of parallel loop FOR_STMT and store
217 extract_omp_for_data (gimple for_stmt
, struct omp_for_data
*fd
,
218 struct omp_for_data_loop
*loops
)
220 tree t
, var
, *collapse_iter
, *collapse_count
;
221 tree count
= NULL_TREE
, iter_type
= long_integer_type_node
;
222 struct omp_for_data_loop
*loop
;
224 struct omp_for_data_loop dummy_loop
;
225 location_t loc
= gimple_location (for_stmt
);
227 fd
->for_stmt
= for_stmt
;
229 fd
->collapse
= gimple_omp_for_collapse (for_stmt
);
230 if (fd
->collapse
> 1)
233 fd
->loops
= &fd
->loop
;
235 fd
->have_nowait
= fd
->have_ordered
= false;
236 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
237 fd
->chunk_size
= NULL_TREE
;
238 collapse_iter
= NULL
;
239 collapse_count
= NULL
;
241 for (t
= gimple_omp_for_clauses (for_stmt
); t
; t
= OMP_CLAUSE_CHAIN (t
))
242 switch (OMP_CLAUSE_CODE (t
))
244 case OMP_CLAUSE_NOWAIT
:
245 fd
->have_nowait
= true;
247 case OMP_CLAUSE_ORDERED
:
248 fd
->have_ordered
= true;
250 case OMP_CLAUSE_SCHEDULE
:
251 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_KIND (t
);
252 fd
->chunk_size
= OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t
);
254 case OMP_CLAUSE_COLLAPSE
:
255 if (fd
->collapse
> 1)
257 collapse_iter
= &OMP_CLAUSE_COLLAPSE_ITERVAR (t
);
258 collapse_count
= &OMP_CLAUSE_COLLAPSE_COUNT (t
);
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_AUTO
)
270 fd
->sched_kind
= OMP_CLAUSE_SCHEDULE_STATIC
;
271 gcc_assert (fd
->chunk_size
== NULL
);
273 gcc_assert (fd
->collapse
== 1 || collapse_iter
!= NULL
);
274 if (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
275 gcc_assert (fd
->chunk_size
== NULL
);
276 else if (fd
->chunk_size
== NULL
)
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd
->sched_kind
!= OMP_CLAUSE_SCHEDULE_STATIC
283 fd
->chunk_size
= (fd
->sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
)
284 ? integer_zero_node
: integer_one_node
;
287 for (i
= 0; i
< fd
->collapse
; i
++)
289 if (fd
->collapse
== 1)
291 else if (loops
!= NULL
)
297 loop
->v
= gimple_omp_for_index (for_stmt
, i
);
298 gcc_assert (SSA_VAR_P (loop
->v
));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop
->v
)) == POINTER_TYPE
);
301 var
= TREE_CODE (loop
->v
) == SSA_NAME
? SSA_NAME_VAR (loop
->v
) : loop
->v
;
302 loop
->n1
= gimple_omp_for_initial (for_stmt
, i
);
304 loop
->cond_code
= gimple_omp_for_cond (for_stmt
, i
);
305 loop
->n2
= gimple_omp_for_final (for_stmt
, i
);
306 switch (loop
->cond_code
)
312 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
313 loop
->n2
= fold_build2_loc (loc
,
314 POINTER_PLUS_EXPR
, TREE_TYPE (loop
->n2
),
315 loop
->n2
, size_one_node
);
317 loop
->n2
= fold_build2_loc (loc
,
318 PLUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
319 build_int_cst (TREE_TYPE (loop
->n2
), 1));
320 loop
->cond_code
= LT_EXPR
;
323 if (POINTER_TYPE_P (TREE_TYPE (loop
->n2
)))
324 loop
->n2
= fold_build2_loc (loc
,
325 POINTER_PLUS_EXPR
, TREE_TYPE (loop
->n2
),
326 loop
->n2
, size_int (-1));
328 loop
->n2
= fold_build2_loc (loc
,
329 MINUS_EXPR
, TREE_TYPE (loop
->n2
), loop
->n2
,
330 build_int_cst (TREE_TYPE (loop
->n2
), 1));
331 loop
->cond_code
= GT_EXPR
;
337 t
= gimple_omp_for_incr (for_stmt
, i
);
338 gcc_assert (TREE_OPERAND (t
, 0) == var
);
339 switch (TREE_CODE (t
))
342 case POINTER_PLUS_EXPR
:
343 loop
->step
= TREE_OPERAND (t
, 1);
346 loop
->step
= TREE_OPERAND (t
, 1);
347 loop
->step
= fold_build1_loc (loc
,
348 NEGATE_EXPR
, TREE_TYPE (loop
->step
),
355 if (iter_type
!= long_long_unsigned_type_node
)
357 if (POINTER_TYPE_P (TREE_TYPE (loop
->v
)))
358 iter_type
= long_long_unsigned_type_node
;
359 else if (TYPE_UNSIGNED (TREE_TYPE (loop
->v
))
360 && TYPE_PRECISION (TREE_TYPE (loop
->v
))
361 >= TYPE_PRECISION (iter_type
))
365 if (loop
->cond_code
== LT_EXPR
)
366 n
= fold_build2_loc (loc
,
367 PLUS_EXPR
, TREE_TYPE (loop
->v
),
368 loop
->n2
, loop
->step
);
371 if (TREE_CODE (n
) != INTEGER_CST
372 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type
), n
))
373 iter_type
= long_long_unsigned_type_node
;
375 else if (TYPE_PRECISION (TREE_TYPE (loop
->v
))
376 > TYPE_PRECISION (iter_type
))
380 if (loop
->cond_code
== LT_EXPR
)
383 n2
= fold_build2_loc (loc
,
384 PLUS_EXPR
, TREE_TYPE (loop
->v
),
385 loop
->n2
, loop
->step
);
389 n1
= fold_build2_loc (loc
,
390 MINUS_EXPR
, TREE_TYPE (loop
->v
),
391 loop
->n2
, loop
->step
);
394 if (TREE_CODE (n1
) != INTEGER_CST
395 || TREE_CODE (n2
) != INTEGER_CST
396 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type
), n1
)
397 || !tree_int_cst_lt (n2
, TYPE_MAX_VALUE (iter_type
)))
398 iter_type
= long_long_unsigned_type_node
;
402 if (collapse_count
&& *collapse_count
== NULL
)
404 if ((i
== 0 || count
!= NULL_TREE
)
405 && TREE_CODE (TREE_TYPE (loop
->v
)) == INTEGER_TYPE
406 && TREE_CONSTANT (loop
->n1
)
407 && TREE_CONSTANT (loop
->n2
)
408 && TREE_CODE (loop
->step
) == INTEGER_CST
)
410 tree itype
= TREE_TYPE (loop
->v
);
412 if (POINTER_TYPE_P (itype
))
414 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
415 t
= build_int_cst (itype
, (loop
->cond_code
== LT_EXPR
? -1 : 1));
416 t
= fold_build2_loc (loc
,
418 fold_convert_loc (loc
, itype
, loop
->step
), t
);
419 t
= fold_build2_loc (loc
, PLUS_EXPR
, itype
, t
,
420 fold_convert_loc (loc
, itype
, loop
->n2
));
421 t
= fold_build2_loc (loc
, MINUS_EXPR
, itype
, t
,
422 fold_convert_loc (loc
, itype
, loop
->n1
));
423 if (TYPE_UNSIGNED (itype
) && loop
->cond_code
== GT_EXPR
)
424 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
,
425 fold_build1_loc (loc
, NEGATE_EXPR
, itype
, t
),
426 fold_build1_loc (loc
, NEGATE_EXPR
, itype
,
427 fold_convert_loc (loc
, itype
,
430 t
= fold_build2_loc (loc
, TRUNC_DIV_EXPR
, itype
, t
,
431 fold_convert_loc (loc
, itype
, loop
->step
));
432 t
= fold_convert_loc (loc
, long_long_unsigned_type_node
, t
);
433 if (count
!= NULL_TREE
)
434 count
= fold_build2_loc (loc
,
435 MULT_EXPR
, long_long_unsigned_type_node
,
439 if (TREE_CODE (count
) != INTEGER_CST
)
449 if (!tree_int_cst_lt (count
, TYPE_MAX_VALUE (long_integer_type_node
)))
450 iter_type
= long_long_unsigned_type_node
;
452 iter_type
= long_integer_type_node
;
454 else if (collapse_iter
&& *collapse_iter
!= NULL
)
455 iter_type
= TREE_TYPE (*collapse_iter
);
456 fd
->iter_type
= iter_type
;
457 if (collapse_iter
&& *collapse_iter
== NULL
)
458 *collapse_iter
= create_tmp_var (iter_type
, ".iter");
459 if (collapse_count
&& *collapse_count
== NULL
)
462 *collapse_count
= fold_convert_loc (loc
, iter_type
, count
);
464 *collapse_count
= create_tmp_var (iter_type
, ".count");
467 if (fd
->collapse
> 1)
469 fd
->loop
.v
= *collapse_iter
;
470 fd
->loop
.n1
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
471 fd
->loop
.n2
= *collapse_count
;
472 fd
->loop
.step
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 1);
473 fd
->loop
.cond_code
= LT_EXPR
;
478 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
479 is the immediate dominator of PAR_ENTRY_BB, return true if there
480 are no data dependencies that would prevent expanding the parallel
481 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
483 When expanding a combined parallel+workshare region, the call to
484 the child function may need additional arguments in the case of
485 GIMPLE_OMP_FOR regions. In some cases, these arguments are
486 computed out of variables passed in from the parent to the child
487 via 'struct .omp_data_s'. For instance:
489 #pragma omp parallel for schedule (guided, i * 4)
494 # BLOCK 2 (PAR_ENTRY_BB)
496 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
498 # BLOCK 3 (WS_ENTRY_BB)
499 .omp_data_i = &.omp_data_o;
500 D.1667 = .omp_data_i->i;
502 #pragma omp for schedule (guided, D.1598)
504 When we outline the parallel region, the call to the child function
505 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
506 that value is computed *after* the call site. So, in principle we
507 cannot do the transformation.
509 To see whether the code in WS_ENTRY_BB blocks the combined
510 parallel+workshare call, we collect all the variables used in the
511 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
512 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
515 FIXME. If we had the SSA form built at this point, we could merely
516 hoist the code in block 3 into block 2 and be done with it. But at
517 this point we don't have dataflow information and though we could
518 hack something up here, it is really not worth the aggravation. */
521 workshare_safe_to_combine_p (basic_block ws_entry_bb
)
523 struct omp_for_data fd
;
524 gimple ws_stmt
= last_stmt (ws_entry_bb
);
526 if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
529 gcc_assert (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
);
531 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
533 if (fd
.collapse
> 1 && TREE_CODE (fd
.loop
.n2
) != INTEGER_CST
)
535 if (fd
.iter_type
!= long_integer_type_node
)
538 /* FIXME. We give up too easily here. If any of these arguments
539 are not constants, they will likely involve variables that have
540 been mapped into fields of .omp_data_s for sharing with the child
541 function. With appropriate data flow, it would be possible to
543 if (!is_gimple_min_invariant (fd
.loop
.n1
)
544 || !is_gimple_min_invariant (fd
.loop
.n2
)
545 || !is_gimple_min_invariant (fd
.loop
.step
)
546 || (fd
.chunk_size
&& !is_gimple_min_invariant (fd
.chunk_size
)))
553 /* Collect additional arguments needed to emit a combined
554 parallel+workshare call. WS_STMT is the workshare directive being
557 static VEC(tree
,gc
) *
558 get_ws_args_for (gimple ws_stmt
)
561 location_t loc
= gimple_location (ws_stmt
);
562 VEC(tree
,gc
) *ws_args
;
564 if (gimple_code (ws_stmt
) == GIMPLE_OMP_FOR
)
566 struct omp_for_data fd
;
568 extract_omp_for_data (ws_stmt
, &fd
, NULL
);
570 ws_args
= VEC_alloc (tree
, gc
, 3 + (fd
.chunk_size
!= 0));
572 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n1
);
573 VEC_quick_push (tree
, ws_args
, t
);
575 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.n2
);
576 VEC_quick_push (tree
, ws_args
, t
);
578 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.loop
.step
);
579 VEC_quick_push (tree
, ws_args
, t
);
583 t
= fold_convert_loc (loc
, long_integer_type_node
, fd
.chunk_size
);
584 VEC_quick_push (tree
, ws_args
, t
);
589 else if (gimple_code (ws_stmt
) == GIMPLE_OMP_SECTIONS
)
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb
= single_succ (gimple_bb (ws_stmt
));
595 t
= build_int_cst (unsigned_type_node
, EDGE_COUNT (bb
->succs
) - 1);
596 ws_args
= VEC_alloc (tree
, gc
, 1);
597 VEC_quick_push (tree
, ws_args
, t
);
605 /* Discover whether REGION is a combined parallel+workshare region. */
608 determine_parallel_type (struct omp_region
*region
)
610 basic_block par_entry_bb
, par_exit_bb
;
611 basic_block ws_entry_bb
, ws_exit_bb
;
613 if (region
== NULL
|| region
->inner
== NULL
614 || region
->exit
== NULL
|| region
->inner
->exit
== NULL
615 || region
->inner
->cont
== NULL
)
618 /* We only support parallel+for and parallel+sections. */
619 if (region
->type
!= GIMPLE_OMP_PARALLEL
620 || (region
->inner
->type
!= GIMPLE_OMP_FOR
621 && region
->inner
->type
!= GIMPLE_OMP_SECTIONS
))
624 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
625 WS_EXIT_BB -> PAR_EXIT_BB. */
626 par_entry_bb
= region
->entry
;
627 par_exit_bb
= region
->exit
;
628 ws_entry_bb
= region
->inner
->entry
;
629 ws_exit_bb
= region
->inner
->exit
;
631 if (single_succ (par_entry_bb
) == ws_entry_bb
632 && single_succ (ws_exit_bb
) == par_exit_bb
633 && workshare_safe_to_combine_p (ws_entry_bb
)
634 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb
))
635 || (last_and_only_stmt (ws_entry_bb
)
636 && last_and_only_stmt (par_exit_bb
))))
638 gimple ws_stmt
= last_stmt (ws_entry_bb
);
640 if (region
->inner
->type
== GIMPLE_OMP_FOR
)
642 /* If this is a combined parallel loop, we need to determine
643 whether or not to use the combined library calls. There
644 are two cases where we do not apply the transformation:
645 static loops and any kind of ordered loop. In the first
646 case, we already open code the loop so there is no need
647 to do anything else. In the latter case, the combined
648 parallel loop call would still need extra synchronization
649 to implement ordered semantics, so there would not be any
650 gain in using the combined call. */
651 tree clauses
= gimple_omp_for_clauses (ws_stmt
);
652 tree c
= find_omp_clause (clauses
, OMP_CLAUSE_SCHEDULE
);
654 || OMP_CLAUSE_SCHEDULE_KIND (c
) == OMP_CLAUSE_SCHEDULE_STATIC
655 || find_omp_clause (clauses
, OMP_CLAUSE_ORDERED
))
657 region
->is_combined_parallel
= false;
658 region
->inner
->is_combined_parallel
= false;
663 region
->is_combined_parallel
= true;
664 region
->inner
->is_combined_parallel
= true;
665 region
->ws_args
= get_ws_args_for (ws_stmt
);
670 /* Return true if EXPR is variable sized. */
673 is_variable_sized (const_tree expr
)
675 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
678 /* Return true if DECL is a reference type. */
681 is_reference (tree decl
)
683 return lang_hooks
.decls
.omp_privatize_by_reference (decl
);
686 /* Lookup variables in the decl or field splay trees. The "maybe" form
687 allows for the variable form to not have been entered, otherwise we
688 assert that the variable must have been entered. */
691 lookup_decl (tree var
, omp_context
*ctx
)
694 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
699 maybe_lookup_decl (const_tree var
, omp_context
*ctx
)
702 n
= (tree
*) pointer_map_contains (ctx
->cb
.decl_map
, var
);
703 return n
? *n
: NULL_TREE
;
707 lookup_field (tree var
, omp_context
*ctx
)
710 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
711 return (tree
) n
->value
;
715 lookup_sfield (tree var
, omp_context
*ctx
)
718 n
= splay_tree_lookup (ctx
->sfield_map
719 ? ctx
->sfield_map
: ctx
->field_map
,
720 (splay_tree_key
) var
);
721 return (tree
) n
->value
;
725 maybe_lookup_field (tree var
, omp_context
*ctx
)
728 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
);
729 return n
? (tree
) n
->value
: NULL_TREE
;
732 /* Return true if DECL should be copied by pointer. SHARED_CTX is
733 the parallel context if DECL is to be shared. */
736 use_pointer_for_field (tree decl
, omp_context
*shared_ctx
)
738 if (AGGREGATE_TYPE_P (TREE_TYPE (decl
)))
741 /* We can only use copy-in/copy-out semantics for shared variables
742 when we know the value is not accessible from an outer scope. */
745 /* ??? Trivially accessible from anywhere. But why would we even
746 be passing an address in this case? Should we simply assert
747 this to be false, or should we have a cleanup pass that removes
748 these from the list of mappings? */
749 if (TREE_STATIC (decl
) || DECL_EXTERNAL (decl
))
752 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
753 without analyzing the expression whether or not its location
754 is accessible to anyone else. In the case of nested parallel
755 regions it certainly may be. */
756 if (TREE_CODE (decl
) != RESULT_DECL
&& DECL_HAS_VALUE_EXPR_P (decl
))
759 /* Do not use copy-in/copy-out for variables that have their
761 if (TREE_ADDRESSABLE (decl
))
764 /* Disallow copy-in/out in nested parallel if
765 decl is shared in outer parallel, otherwise
766 each thread could store the shared variable
767 in its own copy-in location, making the
768 variable no longer really shared. */
769 if (!TREE_READONLY (decl
) && shared_ctx
->is_nested
)
773 for (up
= shared_ctx
->outer
; up
; up
= up
->outer
)
774 if (is_taskreg_ctx (up
) && maybe_lookup_decl (decl
, up
))
781 for (c
= gimple_omp_taskreg_clauses (up
->stmt
);
782 c
; c
= OMP_CLAUSE_CHAIN (c
))
783 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_SHARED
784 && OMP_CLAUSE_DECL (c
) == decl
)
792 /* For tasks avoid using copy-in/out, unless they are readonly
793 (in which case just copy-in is used). As tasks can be
794 deferred or executed in different thread, when GOMP_task
795 returns, the task hasn't necessarily terminated. */
796 if (!TREE_READONLY (decl
) && is_task_ctx (shared_ctx
))
798 tree outer
= maybe_lookup_decl_in_outer_ctx (decl
, shared_ctx
);
799 if (is_gimple_reg (outer
))
801 /* Taking address of OUTER in lower_send_shared_vars
802 might need regimplification of everything that uses the
804 if (!task_shared_vars
)
805 task_shared_vars
= BITMAP_ALLOC (NULL
);
806 bitmap_set_bit (task_shared_vars
, DECL_UID (outer
));
807 TREE_ADDRESSABLE (outer
) = 1;
816 /* Create a new VAR_DECL and copy information from VAR to it. */
819 copy_var_decl (tree var
, tree name
, tree type
)
821 tree copy
= build_decl (DECL_SOURCE_LOCATION (var
), VAR_DECL
, name
, type
);
823 TREE_ADDRESSABLE (copy
) = TREE_ADDRESSABLE (var
);
824 TREE_THIS_VOLATILE (copy
) = TREE_THIS_VOLATILE (var
);
825 DECL_GIMPLE_REG_P (copy
) = DECL_GIMPLE_REG_P (var
);
826 DECL_ARTIFICIAL (copy
) = DECL_ARTIFICIAL (var
);
827 DECL_IGNORED_P (copy
) = DECL_IGNORED_P (var
);
828 DECL_CONTEXT (copy
) = DECL_CONTEXT (var
);
829 TREE_USED (copy
) = 1;
830 DECL_SEEN_IN_BIND_EXPR_P (copy
) = 1;
835 /* Construct a new automatic decl similar to VAR. */
838 omp_copy_decl_2 (tree var
, tree name
, tree type
, omp_context
*ctx
)
840 tree copy
= copy_var_decl (var
, name
, type
);
842 DECL_CONTEXT (copy
) = current_function_decl
;
843 DECL_CHAIN (copy
) = ctx
->block_vars
;
844 ctx
->block_vars
= copy
;
850 omp_copy_decl_1 (tree var
, omp_context
*ctx
)
852 return omp_copy_decl_2 (var
, DECL_NAME (var
), TREE_TYPE (var
), ctx
);
855 /* Build tree nodes to access the field for VAR on the receiver side. */
858 build_receiver_ref (tree var
, bool by_ref
, omp_context
*ctx
)
860 tree x
, field
= lookup_field (var
, ctx
);
862 /* If the receiver record type was remapped in the child function,
863 remap the field into the new record type. */
864 x
= maybe_lookup_field (field
, ctx
);
868 x
= build_simple_mem_ref (ctx
->receiver_decl
);
869 x
= build3 (COMPONENT_REF
, TREE_TYPE (field
), x
, field
, NULL
);
871 x
= build_simple_mem_ref (x
);
876 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
877 of a parallel, this is a component reference; for workshare constructs
878 this is some variable. */
881 build_outer_var_ref (tree var
, omp_context
*ctx
)
885 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
, ctx
)))
887 else if (is_variable_sized (var
))
889 x
= TREE_OPERAND (DECL_VALUE_EXPR (var
), 0);
890 x
= build_outer_var_ref (x
, ctx
);
891 x
= build_simple_mem_ref (x
);
893 else if (is_taskreg_ctx (ctx
))
895 bool by_ref
= use_pointer_for_field (var
, NULL
);
896 x
= build_receiver_ref (var
, by_ref
, ctx
);
899 x
= lookup_decl (var
, ctx
->outer
);
900 else if (is_reference (var
))
901 /* This can happen with orphaned constructs. If var is reference, it is
902 possible it is shared and as such valid. */
907 if (is_reference (var
))
908 x
= build_simple_mem_ref (x
);
913 /* Build tree nodes to access the field for VAR on the sender side. */
916 build_sender_ref (tree var
, omp_context
*ctx
)
918 tree field
= lookup_sfield (var
, ctx
);
919 return build3 (COMPONENT_REF
, TREE_TYPE (field
),
920 ctx
->sender_decl
, field
, NULL
);
923 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
926 install_var_field (tree var
, bool by_ref
, int mask
, omp_context
*ctx
)
928 tree field
, type
, sfield
= NULL_TREE
;
930 gcc_assert ((mask
& 1) == 0
931 || !splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) var
));
932 gcc_assert ((mask
& 2) == 0 || !ctx
->sfield_map
933 || !splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) var
));
935 type
= TREE_TYPE (var
);
937 type
= build_pointer_type (type
);
938 else if ((mask
& 3) == 1 && is_reference (var
))
939 type
= TREE_TYPE (type
);
941 field
= build_decl (DECL_SOURCE_LOCATION (var
),
942 FIELD_DECL
, DECL_NAME (var
), type
);
944 /* Remember what variable this field was created for. This does have a
945 side effect of making dwarf2out ignore this member, so for helpful
946 debugging we clear it later in delete_omp_context. */
947 DECL_ABSTRACT_ORIGIN (field
) = var
;
948 if (type
== TREE_TYPE (var
))
950 DECL_ALIGN (field
) = DECL_ALIGN (var
);
951 DECL_USER_ALIGN (field
) = DECL_USER_ALIGN (var
);
952 TREE_THIS_VOLATILE (field
) = TREE_THIS_VOLATILE (var
);
955 DECL_ALIGN (field
) = TYPE_ALIGN (type
);
959 insert_field_into_struct (ctx
->record_type
, field
);
960 if (ctx
->srecord_type
)
962 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
963 FIELD_DECL
, DECL_NAME (var
), type
);
964 DECL_ABSTRACT_ORIGIN (sfield
) = var
;
965 DECL_ALIGN (sfield
) = DECL_ALIGN (field
);
966 DECL_USER_ALIGN (sfield
) = DECL_USER_ALIGN (field
);
967 TREE_THIS_VOLATILE (sfield
) = TREE_THIS_VOLATILE (field
);
968 insert_field_into_struct (ctx
->srecord_type
, sfield
);
973 if (ctx
->srecord_type
== NULL_TREE
)
977 ctx
->srecord_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
978 ctx
->sfield_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
979 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= TREE_CHAIN (t
))
981 sfield
= build_decl (DECL_SOURCE_LOCATION (var
),
982 FIELD_DECL
, DECL_NAME (t
), TREE_TYPE (t
));
983 DECL_ABSTRACT_ORIGIN (sfield
) = DECL_ABSTRACT_ORIGIN (t
);
984 insert_field_into_struct (ctx
->srecord_type
, sfield
);
985 splay_tree_insert (ctx
->sfield_map
,
986 (splay_tree_key
) DECL_ABSTRACT_ORIGIN (t
),
987 (splay_tree_value
) sfield
);
991 insert_field_into_struct ((mask
& 1) ? ctx
->record_type
992 : ctx
->srecord_type
, field
);
996 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) var
,
997 (splay_tree_value
) field
);
998 if ((mask
& 2) && ctx
->sfield_map
)
999 splay_tree_insert (ctx
->sfield_map
, (splay_tree_key
) var
,
1000 (splay_tree_value
) sfield
);
1004 install_var_local (tree var
, omp_context
*ctx
)
1006 tree new_var
= omp_copy_decl_1 (var
, ctx
);
1007 insert_decl_map (&ctx
->cb
, var
, new_var
);
1011 /* Adjust the replacement for DECL in CTX for the new context. This means
1012 copying the DECL_VALUE_EXPR, and fixing up the type. */
1015 fixup_remapped_decl (tree decl
, omp_context
*ctx
, bool private_debug
)
1017 tree new_decl
, size
;
1019 new_decl
= lookup_decl (decl
, ctx
);
1021 TREE_TYPE (new_decl
) = remap_type (TREE_TYPE (decl
), &ctx
->cb
);
1023 if ((!TREE_CONSTANT (DECL_SIZE (new_decl
)) || private_debug
)
1024 && DECL_HAS_VALUE_EXPR_P (decl
))
1026 tree ve
= DECL_VALUE_EXPR (decl
);
1027 walk_tree (&ve
, copy_tree_body_r
, &ctx
->cb
, NULL
);
1028 SET_DECL_VALUE_EXPR (new_decl
, ve
);
1029 DECL_HAS_VALUE_EXPR_P (new_decl
) = 1;
1032 if (!TREE_CONSTANT (DECL_SIZE (new_decl
)))
1034 size
= remap_decl (DECL_SIZE (decl
), &ctx
->cb
);
1035 if (size
== error_mark_node
)
1036 size
= TYPE_SIZE (TREE_TYPE (new_decl
));
1037 DECL_SIZE (new_decl
) = size
;
1039 size
= remap_decl (DECL_SIZE_UNIT (decl
), &ctx
->cb
);
1040 if (size
== error_mark_node
)
1041 size
= TYPE_SIZE_UNIT (TREE_TYPE (new_decl
));
1042 DECL_SIZE_UNIT (new_decl
) = size
;
1046 /* The callback for remap_decl. Search all containing contexts for a
1047 mapping of the variable; this avoids having to duplicate the splay
1048 tree ahead of time. We know a mapping doesn't already exist in the
1049 given context. Create new mappings to implement default semantics. */
1052 omp_copy_decl (tree var
, copy_body_data
*cb
)
1054 omp_context
*ctx
= (omp_context
*) cb
;
1057 if (TREE_CODE (var
) == LABEL_DECL
)
1059 new_var
= create_artificial_label (DECL_SOURCE_LOCATION (var
));
1060 DECL_CONTEXT (new_var
) = current_function_decl
;
1061 insert_decl_map (&ctx
->cb
, var
, new_var
);
1065 while (!is_taskreg_ctx (ctx
))
1070 new_var
= maybe_lookup_decl (var
, ctx
);
1075 if (is_global_var (var
) || decl_function_context (var
) != ctx
->cb
.src_fn
)
1078 return error_mark_node
;
1082 /* Return the parallel region associated with STMT. */
1084 /* Debugging dumps for parallel regions. */
1085 void dump_omp_region (FILE *, struct omp_region
*, int);
1086 void debug_omp_region (struct omp_region
*);
1087 void debug_all_omp_regions (void);
1089 /* Dump the parallel region tree rooted at REGION. */
1092 dump_omp_region (FILE *file
, struct omp_region
*region
, int indent
)
1094 fprintf (file
, "%*sbb %d: %s\n", indent
, "", region
->entry
->index
,
1095 gimple_code_name
[region
->type
]);
1098 dump_omp_region (file
, region
->inner
, indent
+ 4);
1102 fprintf (file
, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent
, "",
1103 region
->cont
->index
);
1107 fprintf (file
, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent
, "",
1108 region
->exit
->index
);
1110 fprintf (file
, "%*s[no exit marker]\n", indent
, "");
1113 dump_omp_region (file
, region
->next
, indent
);
1117 debug_omp_region (struct omp_region
*region
)
1119 dump_omp_region (stderr
, region
, 0);
1123 debug_all_omp_regions (void)
1125 dump_omp_region (stderr
, root_omp_region
, 0);
1129 /* Create a new parallel region starting at STMT inside region PARENT. */
1132 new_omp_region (basic_block bb
, enum gimple_code type
,
1133 struct omp_region
*parent
)
1135 struct omp_region
*region
= XCNEW (struct omp_region
);
1137 region
->outer
= parent
;
1139 region
->type
= type
;
1143 /* This is a nested region. Add it to the list of inner
1144 regions in PARENT. */
1145 region
->next
= parent
->inner
;
1146 parent
->inner
= region
;
1150 /* This is a toplevel region. Add it to the list of toplevel
1151 regions in ROOT_OMP_REGION. */
1152 region
->next
= root_omp_region
;
1153 root_omp_region
= region
;
1159 /* Release the memory associated with the region tree rooted at REGION. */
1162 free_omp_region_1 (struct omp_region
*region
)
1164 struct omp_region
*i
, *n
;
1166 for (i
= region
->inner
; i
; i
= n
)
1169 free_omp_region_1 (i
);
1175 /* Release the memory for the entire omp region tree. */
1178 free_omp_regions (void)
1180 struct omp_region
*r
, *n
;
1181 for (r
= root_omp_region
; r
; r
= n
)
1184 free_omp_region_1 (r
);
1186 root_omp_region
= NULL
;
1190 /* Create a new context, with OUTER_CTX being the surrounding context. */
1192 static omp_context
*
1193 new_omp_context (gimple stmt
, omp_context
*outer_ctx
)
1195 omp_context
*ctx
= XCNEW (omp_context
);
1197 splay_tree_insert (all_contexts
, (splay_tree_key
) stmt
,
1198 (splay_tree_value
) ctx
);
1203 ctx
->outer
= outer_ctx
;
1204 ctx
->cb
= outer_ctx
->cb
;
1205 ctx
->cb
.block
= NULL
;
1206 ctx
->depth
= outer_ctx
->depth
+ 1;
1210 ctx
->cb
.src_fn
= current_function_decl
;
1211 ctx
->cb
.dst_fn
= current_function_decl
;
1212 ctx
->cb
.src_node
= cgraph_get_node (current_function_decl
);
1213 gcc_checking_assert (ctx
->cb
.src_node
);
1214 ctx
->cb
.dst_node
= ctx
->cb
.src_node
;
1215 ctx
->cb
.src_cfun
= cfun
;
1216 ctx
->cb
.copy_decl
= omp_copy_decl
;
1217 ctx
->cb
.eh_lp_nr
= 0;
1218 ctx
->cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
1222 ctx
->cb
.decl_map
= pointer_map_create ();
1227 static gimple_seq
maybe_catch_exception (gimple_seq
);
1229 /* Finalize task copyfn. */
1232 finalize_task_copyfn (gimple task_stmt
)
1234 struct function
*child_cfun
;
1235 tree child_fn
, old_fn
;
1236 gimple_seq seq
, new_seq
;
1239 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
1240 if (child_fn
== NULL_TREE
)
1243 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
1245 /* Inform the callgraph about the new function. */
1246 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
1247 = cfun
->curr_properties
;
1249 old_fn
= current_function_decl
;
1250 push_cfun (child_cfun
);
1251 current_function_decl
= child_fn
;
1252 bind
= gimplify_body (&DECL_SAVED_TREE (child_fn
), child_fn
, false);
1253 seq
= gimple_seq_alloc ();
1254 gimple_seq_add_stmt (&seq
, bind
);
1255 new_seq
= maybe_catch_exception (seq
);
1258 bind
= gimple_build_bind (NULL
, new_seq
, NULL
);
1259 seq
= gimple_seq_alloc ();
1260 gimple_seq_add_stmt (&seq
, bind
);
1262 gimple_set_body (child_fn
, seq
);
1264 current_function_decl
= old_fn
;
1266 cgraph_add_new_function (child_fn
, false);
1269 /* Destroy a omp_context data structures. Called through the splay tree
1270 value delete callback. */
1273 delete_omp_context (splay_tree_value value
)
1275 omp_context
*ctx
= (omp_context
*) value
;
1277 pointer_map_destroy (ctx
->cb
.decl_map
);
1280 splay_tree_delete (ctx
->field_map
);
1281 if (ctx
->sfield_map
)
1282 splay_tree_delete (ctx
->sfield_map
);
1284 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1285 it produces corrupt debug information. */
1286 if (ctx
->record_type
)
1289 for (t
= TYPE_FIELDS (ctx
->record_type
); t
; t
= DECL_CHAIN (t
))
1290 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1292 if (ctx
->srecord_type
)
1295 for (t
= TYPE_FIELDS (ctx
->srecord_type
); t
; t
= DECL_CHAIN (t
))
1296 DECL_ABSTRACT_ORIGIN (t
) = NULL
;
1299 if (is_task_ctx (ctx
))
1300 finalize_task_copyfn (ctx
->stmt
);
1305 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1309 fixup_child_record_type (omp_context
*ctx
)
1311 tree f
, type
= ctx
->record_type
;
1313 /* ??? It isn't sufficient to just call remap_type here, because
1314 variably_modified_type_p doesn't work the way we expect for
1315 record types. Testing each field for whether it needs remapping
1316 and creating a new record by hand works, however. */
1317 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
1318 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
1322 tree name
, new_fields
= NULL
;
1324 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1325 name
= DECL_NAME (TYPE_NAME (ctx
->record_type
));
1326 name
= build_decl (DECL_SOURCE_LOCATION (ctx
->receiver_decl
),
1327 TYPE_DECL
, name
, type
);
1328 TYPE_NAME (type
) = name
;
1330 for (f
= TYPE_FIELDS (ctx
->record_type
); f
; f
= DECL_CHAIN (f
))
1332 tree new_f
= copy_node (f
);
1333 DECL_CONTEXT (new_f
) = type
;
1334 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &ctx
->cb
);
1335 DECL_CHAIN (new_f
) = new_fields
;
1336 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &ctx
->cb
, NULL
);
1337 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
,
1339 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
1343 /* Arrange to be able to look up the receiver field
1344 given the sender field. */
1345 splay_tree_insert (ctx
->field_map
, (splay_tree_key
) f
,
1346 (splay_tree_value
) new_f
);
1348 TYPE_FIELDS (type
) = nreverse (new_fields
);
1352 TREE_TYPE (ctx
->receiver_decl
) = build_pointer_type (type
);
1355 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1356 specified by CLAUSES. */
1359 scan_sharing_clauses (tree clauses
, omp_context
*ctx
)
1362 bool scan_array_reductions
= false;
1364 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1368 switch (OMP_CLAUSE_CODE (c
))
1370 case OMP_CLAUSE_PRIVATE
:
1371 decl
= OMP_CLAUSE_DECL (c
);
1372 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
1374 else if (!is_variable_sized (decl
))
1375 install_var_local (decl
, ctx
);
1378 case OMP_CLAUSE_SHARED
:
1379 gcc_assert (is_taskreg_ctx (ctx
));
1380 decl
= OMP_CLAUSE_DECL (c
);
1381 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl
))
1382 || !is_variable_sized (decl
));
1383 /* Global variables don't need to be copied,
1384 the receiver side will use them directly. */
1385 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1387 by_ref
= use_pointer_for_field (decl
, ctx
);
1388 if (! TREE_READONLY (decl
)
1389 || TREE_ADDRESSABLE (decl
)
1391 || is_reference (decl
))
1393 install_var_field (decl
, by_ref
, 3, ctx
);
1394 install_var_local (decl
, ctx
);
1397 /* We don't need to copy const scalar vars back. */
1398 OMP_CLAUSE_SET_CODE (c
, OMP_CLAUSE_FIRSTPRIVATE
);
1401 case OMP_CLAUSE_LASTPRIVATE
:
1402 /* Let the corresponding firstprivate clause create
1404 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1408 case OMP_CLAUSE_FIRSTPRIVATE
:
1409 case OMP_CLAUSE_REDUCTION
:
1410 decl
= OMP_CLAUSE_DECL (c
);
1412 if (is_variable_sized (decl
))
1414 if (is_task_ctx (ctx
))
1415 install_var_field (decl
, false, 1, ctx
);
1418 else if (is_taskreg_ctx (ctx
))
1421 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
));
1422 by_ref
= use_pointer_for_field (decl
, NULL
);
1424 if (is_task_ctx (ctx
)
1425 && (global
|| by_ref
|| is_reference (decl
)))
1427 install_var_field (decl
, false, 1, ctx
);
1429 install_var_field (decl
, by_ref
, 2, ctx
);
1432 install_var_field (decl
, by_ref
, 3, ctx
);
1434 install_var_local (decl
, ctx
);
1437 case OMP_CLAUSE_COPYPRIVATE
:
1438 case OMP_CLAUSE_COPYIN
:
1439 decl
= OMP_CLAUSE_DECL (c
);
1440 by_ref
= use_pointer_for_field (decl
, NULL
);
1441 install_var_field (decl
, by_ref
, 3, ctx
);
1444 case OMP_CLAUSE_DEFAULT
:
1445 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_KIND (c
);
1449 case OMP_CLAUSE_NUM_THREADS
:
1450 case OMP_CLAUSE_SCHEDULE
:
1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c
, 0), ctx
->outer
);
1455 case OMP_CLAUSE_NOWAIT
:
1456 case OMP_CLAUSE_ORDERED
:
1457 case OMP_CLAUSE_COLLAPSE
:
1458 case OMP_CLAUSE_UNTIED
:
1466 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1468 switch (OMP_CLAUSE_CODE (c
))
1470 case OMP_CLAUSE_LASTPRIVATE
:
1471 /* Let the corresponding firstprivate clause create
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1474 scan_array_reductions
= true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
1479 case OMP_CLAUSE_PRIVATE
:
1480 case OMP_CLAUSE_FIRSTPRIVATE
:
1481 case OMP_CLAUSE_REDUCTION
:
1482 decl
= OMP_CLAUSE_DECL (c
);
1483 if (is_variable_sized (decl
))
1484 install_var_local (decl
, ctx
);
1485 fixup_remapped_decl (decl
, ctx
,
1486 OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c
));
1488 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1490 scan_array_reductions
= true;
1493 case OMP_CLAUSE_SHARED
:
1494 decl
= OMP_CLAUSE_DECL (c
);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl
, ctx
)))
1496 fixup_remapped_decl (decl
, ctx
, false);
1499 case OMP_CLAUSE_COPYPRIVATE
:
1500 case OMP_CLAUSE_COPYIN
:
1501 case OMP_CLAUSE_DEFAULT
:
1503 case OMP_CLAUSE_NUM_THREADS
:
1504 case OMP_CLAUSE_SCHEDULE
:
1505 case OMP_CLAUSE_NOWAIT
:
1506 case OMP_CLAUSE_ORDERED
:
1507 case OMP_CLAUSE_COLLAPSE
:
1508 case OMP_CLAUSE_UNTIED
:
1516 if (scan_array_reductions
)
1517 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
1518 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
1519 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
1521 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
1522 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
1524 else if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
1525 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
1526 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
1529 /* Create a new name for omp child function. Returns an identifier. */
1531 static GTY(()) unsigned int tmp_ompfn_id_num
;
1534 create_omp_child_function_name (bool task_copy
)
1536 return (clone_function_name (current_function_decl
,
1537 task_copy
? "_omp_cpyfn" : "_omp_fn"));
1540 /* Build a decl for the omp child function. It'll not contain a body
1541 yet, just the bare decl. */
1544 create_omp_child_function (omp_context
*ctx
, bool task_copy
)
1546 tree decl
, type
, name
, t
;
1548 name
= create_omp_child_function_name (task_copy
);
1550 type
= build_function_type_list (void_type_node
, ptr_type_node
,
1551 ptr_type_node
, NULL_TREE
);
1553 type
= build_function_type_list (void_type_node
, ptr_type_node
, NULL_TREE
);
1555 decl
= build_decl (gimple_location (ctx
->stmt
),
1556 FUNCTION_DECL
, name
, type
);
1559 ctx
->cb
.dst_fn
= decl
;
1561 gimple_omp_task_set_copy_fn (ctx
->stmt
, decl
);
1563 TREE_STATIC (decl
) = 1;
1564 TREE_USED (decl
) = 1;
1565 DECL_ARTIFICIAL (decl
) = 1;
1566 DECL_NAMELESS (decl
) = 1;
1567 DECL_IGNORED_P (decl
) = 0;
1568 TREE_PUBLIC (decl
) = 0;
1569 DECL_UNINLINABLE (decl
) = 1;
1570 DECL_EXTERNAL (decl
) = 0;
1571 DECL_CONTEXT (decl
) = NULL_TREE
;
1572 DECL_INITIAL (decl
) = make_node (BLOCK
);
1574 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1575 RESULT_DECL
, NULL_TREE
, void_type_node
);
1576 DECL_ARTIFICIAL (t
) = 1;
1577 DECL_IGNORED_P (t
) = 1;
1578 DECL_CONTEXT (t
) = decl
;
1579 DECL_RESULT (decl
) = t
;
1581 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1582 PARM_DECL
, get_identifier (".omp_data_i"), ptr_type_node
);
1583 DECL_ARTIFICIAL (t
) = 1;
1584 DECL_NAMELESS (t
) = 1;
1585 DECL_ARG_TYPE (t
) = ptr_type_node
;
1586 DECL_CONTEXT (t
) = current_function_decl
;
1588 DECL_ARGUMENTS (decl
) = t
;
1590 ctx
->receiver_decl
= t
;
1593 t
= build_decl (DECL_SOURCE_LOCATION (decl
),
1594 PARM_DECL
, get_identifier (".omp_data_o"),
1596 DECL_ARTIFICIAL (t
) = 1;
1597 DECL_NAMELESS (t
) = 1;
1598 DECL_ARG_TYPE (t
) = ptr_type_node
;
1599 DECL_CONTEXT (t
) = current_function_decl
;
1601 TREE_ADDRESSABLE (t
) = 1;
1602 DECL_CHAIN (t
) = DECL_ARGUMENTS (decl
);
1603 DECL_ARGUMENTS (decl
) = t
;
1606 /* Allocate memory for the function structure. The call to
1607 allocate_struct_function clobbers CFUN, so we need to restore
1609 push_struct_function (decl
);
1610 cfun
->function_end_locus
= gimple_location (ctx
->stmt
);
1615 /* Scan an OpenMP parallel directive. */
1618 scan_omp_parallel (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1622 gimple stmt
= gsi_stmt (*gsi
);
1624 /* Ignore parallel directives with empty bodies, unless there
1625 are copyin clauses. */
1627 && empty_body_p (gimple_omp_body (stmt
))
1628 && find_omp_clause (gimple_omp_parallel_clauses (stmt
),
1629 OMP_CLAUSE_COPYIN
) == NULL
)
1631 gsi_replace (gsi
, gimple_build_nop (), false);
1635 ctx
= new_omp_context (stmt
, outer_ctx
);
1636 if (taskreg_nesting_level
> 1)
1637 ctx
->is_nested
= true;
1638 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1639 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1640 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1641 name
= create_tmp_var_name (".omp_data_s");
1642 name
= build_decl (gimple_location (stmt
),
1643 TYPE_DECL
, name
, ctx
->record_type
);
1644 DECL_ARTIFICIAL (name
) = 1;
1645 DECL_NAMELESS (name
) = 1;
1646 TYPE_NAME (ctx
->record_type
) = name
;
1647 create_omp_child_function (ctx
, false);
1648 gimple_omp_parallel_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1650 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt
), ctx
);
1651 scan_omp (gimple_omp_body (stmt
), ctx
);
1653 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1654 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1657 layout_type (ctx
->record_type
);
1658 fixup_child_record_type (ctx
);
1662 /* Scan an OpenMP task directive. */
1665 scan_omp_task (gimple_stmt_iterator
*gsi
, omp_context
*outer_ctx
)
1669 gimple stmt
= gsi_stmt (*gsi
);
1670 location_t loc
= gimple_location (stmt
);
1672 /* Ignore task directives with empty bodies. */
1674 && empty_body_p (gimple_omp_body (stmt
)))
1676 gsi_replace (gsi
, gimple_build_nop (), false);
1680 ctx
= new_omp_context (stmt
, outer_ctx
);
1681 if (taskreg_nesting_level
> 1)
1682 ctx
->is_nested
= true;
1683 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1684 ctx
->default_kind
= OMP_CLAUSE_DEFAULT_SHARED
;
1685 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1686 name
= create_tmp_var_name (".omp_data_s");
1687 name
= build_decl (gimple_location (stmt
),
1688 TYPE_DECL
, name
, ctx
->record_type
);
1689 DECL_ARTIFICIAL (name
) = 1;
1690 DECL_NAMELESS (name
) = 1;
1691 TYPE_NAME (ctx
->record_type
) = name
;
1692 create_omp_child_function (ctx
, false);
1693 gimple_omp_task_set_child_fn (stmt
, ctx
->cb
.dst_fn
);
1695 scan_sharing_clauses (gimple_omp_task_clauses (stmt
), ctx
);
1697 if (ctx
->srecord_type
)
1699 name
= create_tmp_var_name (".omp_data_a");
1700 name
= build_decl (gimple_location (stmt
),
1701 TYPE_DECL
, name
, ctx
->srecord_type
);
1702 DECL_ARTIFICIAL (name
) = 1;
1703 DECL_NAMELESS (name
) = 1;
1704 TYPE_NAME (ctx
->srecord_type
) = name
;
1705 create_omp_child_function (ctx
, true);
1708 scan_omp (gimple_omp_body (stmt
), ctx
);
1710 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1712 ctx
->record_type
= ctx
->receiver_decl
= NULL
;
1713 t
= build_int_cst (long_integer_type_node
, 0);
1714 gimple_omp_task_set_arg_size (stmt
, t
);
1715 t
= build_int_cst (long_integer_type_node
, 1);
1716 gimple_omp_task_set_arg_align (stmt
, t
);
1720 tree
*p
, vla_fields
= NULL_TREE
, *q
= &vla_fields
;
1721 /* Move VLA fields to the end. */
1722 p
= &TYPE_FIELDS (ctx
->record_type
);
1724 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p
))
1725 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p
))))
1728 *p
= TREE_CHAIN (*p
);
1729 TREE_CHAIN (*q
) = NULL_TREE
;
1730 q
= &TREE_CHAIN (*q
);
1733 p
= &DECL_CHAIN (*p
);
1735 layout_type (ctx
->record_type
);
1736 fixup_child_record_type (ctx
);
1737 if (ctx
->srecord_type
)
1738 layout_type (ctx
->srecord_type
);
1739 t
= fold_convert_loc (loc
, long_integer_type_node
,
1740 TYPE_SIZE_UNIT (ctx
->record_type
));
1741 gimple_omp_task_set_arg_size (stmt
, t
);
1742 t
= build_int_cst (long_integer_type_node
,
1743 TYPE_ALIGN_UNIT (ctx
->record_type
));
1744 gimple_omp_task_set_arg_align (stmt
, t
);
1749 /* Scan an OpenMP loop directive. */
1752 scan_omp_for (gimple stmt
, omp_context
*outer_ctx
)
1757 ctx
= new_omp_context (stmt
, outer_ctx
);
1759 scan_sharing_clauses (gimple_omp_for_clauses (stmt
), ctx
);
1761 scan_omp (gimple_omp_for_pre_body (stmt
), ctx
);
1762 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
1764 scan_omp_op (gimple_omp_for_index_ptr (stmt
, i
), ctx
);
1765 scan_omp_op (gimple_omp_for_initial_ptr (stmt
, i
), ctx
);
1766 scan_omp_op (gimple_omp_for_final_ptr (stmt
, i
), ctx
);
1767 scan_omp_op (gimple_omp_for_incr_ptr (stmt
, i
), ctx
);
1769 scan_omp (gimple_omp_body (stmt
), ctx
);
1772 /* Scan an OpenMP sections directive. */
1775 scan_omp_sections (gimple stmt
, omp_context
*outer_ctx
)
1779 ctx
= new_omp_context (stmt
, outer_ctx
);
1780 scan_sharing_clauses (gimple_omp_sections_clauses (stmt
), ctx
);
1781 scan_omp (gimple_omp_body (stmt
), ctx
);
1784 /* Scan an OpenMP single directive. */
1787 scan_omp_single (gimple stmt
, omp_context
*outer_ctx
)
1792 ctx
= new_omp_context (stmt
, outer_ctx
);
1793 ctx
->field_map
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
1794 ctx
->record_type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
1795 name
= create_tmp_var_name (".omp_copy_s");
1796 name
= build_decl (gimple_location (stmt
),
1797 TYPE_DECL
, name
, ctx
->record_type
);
1798 TYPE_NAME (ctx
->record_type
) = name
;
1800 scan_sharing_clauses (gimple_omp_single_clauses (stmt
), ctx
);
1801 scan_omp (gimple_omp_body (stmt
), ctx
);
1803 if (TYPE_FIELDS (ctx
->record_type
) == NULL
)
1804 ctx
->record_type
= NULL
;
1806 layout_type (ctx
->record_type
);
1810 /* Check OpenMP nesting restrictions. */
1812 check_omp_nesting_restrictions (gimple stmt
, omp_context
*ctx
)
1814 switch (gimple_code (stmt
))
1816 case GIMPLE_OMP_FOR
:
1817 case GIMPLE_OMP_SECTIONS
:
1818 case GIMPLE_OMP_SINGLE
:
1820 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1821 switch (gimple_code (ctx
->stmt
))
1823 case GIMPLE_OMP_FOR
:
1824 case GIMPLE_OMP_SECTIONS
:
1825 case GIMPLE_OMP_SINGLE
:
1826 case GIMPLE_OMP_ORDERED
:
1827 case GIMPLE_OMP_MASTER
:
1828 case GIMPLE_OMP_TASK
:
1829 if (is_gimple_call (stmt
))
1831 warning (0, "barrier region may not be closely nested inside "
1832 "of work-sharing, critical, ordered, master or "
1833 "explicit task region");
1836 warning (0, "work-sharing region may not be closely nested inside "
1837 "of work-sharing, critical, ordered, master or explicit "
1840 case GIMPLE_OMP_PARALLEL
:
1846 case GIMPLE_OMP_MASTER
:
1847 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1848 switch (gimple_code (ctx
->stmt
))
1850 case GIMPLE_OMP_FOR
:
1851 case GIMPLE_OMP_SECTIONS
:
1852 case GIMPLE_OMP_SINGLE
:
1853 case GIMPLE_OMP_TASK
:
1854 warning (0, "master region may not be closely nested inside "
1855 "of work-sharing or explicit task region");
1857 case GIMPLE_OMP_PARALLEL
:
1863 case GIMPLE_OMP_ORDERED
:
1864 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1865 switch (gimple_code (ctx
->stmt
))
1867 case GIMPLE_OMP_CRITICAL
:
1868 case GIMPLE_OMP_TASK
:
1869 warning (0, "ordered region may not be closely nested inside "
1870 "of critical or explicit task region");
1872 case GIMPLE_OMP_FOR
:
1873 if (find_omp_clause (gimple_omp_for_clauses (ctx
->stmt
),
1874 OMP_CLAUSE_ORDERED
) == NULL
)
1875 warning (0, "ordered region must be closely nested inside "
1876 "a loop region with an ordered clause");
1878 case GIMPLE_OMP_PARALLEL
:
1884 case GIMPLE_OMP_CRITICAL
:
1885 for (; ctx
!= NULL
; ctx
= ctx
->outer
)
1886 if (gimple_code (ctx
->stmt
) == GIMPLE_OMP_CRITICAL
1887 && (gimple_omp_critical_name (stmt
)
1888 == gimple_omp_critical_name (ctx
->stmt
)))
1890 warning (0, "critical region may not be nested inside a critical "
1891 "region with the same name");
1901 /* Helper function scan_omp.
1903 Callback for walk_tree or operators in walk_gimple_stmt used to
1904 scan for OpenMP directives in TP. */
1907 scan_omp_1_op (tree
*tp
, int *walk_subtrees
, void *data
)
1909 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
1910 omp_context
*ctx
= (omp_context
*) wi
->info
;
1913 switch (TREE_CODE (t
))
1920 *tp
= remap_decl (t
, &ctx
->cb
);
1924 if (ctx
&& TYPE_P (t
))
1925 *tp
= remap_type (t
, &ctx
->cb
);
1926 else if (!DECL_P (t
))
1931 tree tem
= remap_type (TREE_TYPE (t
), &ctx
->cb
);
1932 if (tem
!= TREE_TYPE (t
))
1934 if (TREE_CODE (t
) == INTEGER_CST
)
1935 *tp
= build_int_cst_wide (tem
,
1936 TREE_INT_CST_LOW (t
),
1937 TREE_INT_CST_HIGH (t
));
1939 TREE_TYPE (t
) = tem
;
1950 /* Helper function for scan_omp.
1952 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1953 the current statement in GSI. */
1956 scan_omp_1_stmt (gimple_stmt_iterator
*gsi
, bool *handled_ops_p
,
1957 struct walk_stmt_info
*wi
)
1959 gimple stmt
= gsi_stmt (*gsi
);
1960 omp_context
*ctx
= (omp_context
*) wi
->info
;
1962 if (gimple_has_location (stmt
))
1963 input_location
= gimple_location (stmt
);
1965 /* Check the OpenMP nesting restrictions. */
1968 if (is_gimple_omp (stmt
))
1969 check_omp_nesting_restrictions (stmt
, ctx
);
1970 else if (is_gimple_call (stmt
))
1972 tree fndecl
= gimple_call_fndecl (stmt
);
1973 if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
1974 && DECL_FUNCTION_CODE (fndecl
) == BUILT_IN_GOMP_BARRIER
)
1975 check_omp_nesting_restrictions (stmt
, ctx
);
1979 *handled_ops_p
= true;
1981 switch (gimple_code (stmt
))
1983 case GIMPLE_OMP_PARALLEL
:
1984 taskreg_nesting_level
++;
1985 scan_omp_parallel (gsi
, ctx
);
1986 taskreg_nesting_level
--;
1989 case GIMPLE_OMP_TASK
:
1990 taskreg_nesting_level
++;
1991 scan_omp_task (gsi
, ctx
);
1992 taskreg_nesting_level
--;
1995 case GIMPLE_OMP_FOR
:
1996 scan_omp_for (stmt
, ctx
);
1999 case GIMPLE_OMP_SECTIONS
:
2000 scan_omp_sections (stmt
, ctx
);
2003 case GIMPLE_OMP_SINGLE
:
2004 scan_omp_single (stmt
, ctx
);
2007 case GIMPLE_OMP_SECTION
:
2008 case GIMPLE_OMP_MASTER
:
2009 case GIMPLE_OMP_ORDERED
:
2010 case GIMPLE_OMP_CRITICAL
:
2011 ctx
= new_omp_context (stmt
, ctx
);
2012 scan_omp (gimple_omp_body (stmt
), ctx
);
2019 *handled_ops_p
= false;
2021 for (var
= gimple_bind_vars (stmt
); var
; var
= DECL_CHAIN (var
))
2022 insert_decl_map (&ctx
->cb
, var
, var
);
2026 *handled_ops_p
= false;
2034 /* Scan all the statements starting at the current statement. CTX
2035 contains context information about the OpenMP directives and
2036 clauses found during the scan. */
2039 scan_omp (gimple_seq body
, omp_context
*ctx
)
2041 location_t saved_location
;
2042 struct walk_stmt_info wi
;
2044 memset (&wi
, 0, sizeof (wi
));
2046 wi
.want_locations
= true;
2048 saved_location
= input_location
;
2049 walk_gimple_seq (body
, scan_omp_1_stmt
, scan_omp_1_op
, &wi
);
2050 input_location
= saved_location
;
2053 /* Re-gimplification and code generation routines. */
2055 /* Build a call to GOMP_barrier. */
2058 build_omp_barrier (void)
2060 return build_call_expr (built_in_decls
[BUILT_IN_GOMP_BARRIER
], 0);
2063 /* If a context was created for STMT when it was scanned, return it. */
2065 static omp_context
*
2066 maybe_lookup_ctx (gimple stmt
)
2069 n
= splay_tree_lookup (all_contexts
, (splay_tree_key
) stmt
);
2070 return n
? (omp_context
*) n
->value
: NULL
;
2074 /* Find the mapping for DECL in CTX or the immediately enclosing
2075 context that has a mapping for DECL.
2077 If CTX is a nested parallel directive, we may have to use the decl
2078 mappings created in CTX's parent context. Suppose that we have the
2079 following parallel nesting (variable UIDs showed for clarity):
2082 #omp parallel shared(iD.1562) -> outer parallel
2083 iD.1562 = iD.1562 + 1;
2085 #omp parallel shared (iD.1562) -> inner parallel
2086 iD.1562 = iD.1562 - 1;
2088 Each parallel structure will create a distinct .omp_data_s structure
2089 for copying iD.1562 in/out of the directive:
2091 outer parallel .omp_data_s.1.i -> iD.1562
2092 inner parallel .omp_data_s.2.i -> iD.1562
2094 A shared variable mapping will produce a copy-out operation before
2095 the parallel directive and a copy-in operation after it. So, in
2096 this case we would have:
2099 .omp_data_o.1.i = iD.1562;
2100 #omp parallel shared(iD.1562) -> outer parallel
2101 .omp_data_i.1 = &.omp_data_o.1
2102 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2104 .omp_data_o.2.i = iD.1562; -> **
2105 #omp parallel shared(iD.1562) -> inner parallel
2106 .omp_data_i.2 = &.omp_data_o.2
2107 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2110 ** This is a problem. The symbol iD.1562 cannot be referenced
2111 inside the body of the outer parallel region. But since we are
2112 emitting this copy operation while expanding the inner parallel
2113 directive, we need to access the CTX structure of the outer
2114 parallel directive to get the correct mapping:
2116 .omp_data_o.2.i = .omp_data_i.1->i
2118 Since there may be other workshare or parallel directives enclosing
2119 the parallel directive, it may be necessary to walk up the context
2120 parent chain. This is not a problem in general because nested
2121 parallelism happens only rarely. */
2124 lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2129 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2130 t
= maybe_lookup_decl (decl
, up
);
2132 gcc_assert (!ctx
->is_nested
|| t
|| is_global_var (decl
));
2134 return t
? t
: decl
;
2138 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2139 in outer contexts. */
2142 maybe_lookup_decl_in_outer_ctx (tree decl
, omp_context
*ctx
)
2147 for (up
= ctx
->outer
, t
= NULL
; up
&& t
== NULL
; up
= up
->outer
)
2148 t
= maybe_lookup_decl (decl
, up
);
2150 return t
? t
: decl
;
2154 /* Construct the initialization value for reduction CLAUSE. */
2157 omp_reduction_init (tree clause
, tree type
)
2159 location_t loc
= OMP_CLAUSE_LOCATION (clause
);
2160 switch (OMP_CLAUSE_REDUCTION_CODE (clause
))
2167 case TRUTH_ORIF_EXPR
:
2168 case TRUTH_XOR_EXPR
:
2170 return build_zero_cst (type
);
2173 case TRUTH_AND_EXPR
:
2174 case TRUTH_ANDIF_EXPR
:
2176 return fold_convert_loc (loc
, type
, integer_one_node
);
2179 return fold_convert_loc (loc
, type
, integer_minus_one_node
);
2182 if (SCALAR_FLOAT_TYPE_P (type
))
2184 REAL_VALUE_TYPE max
, min
;
2185 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2188 real_arithmetic (&min
, NEGATE_EXPR
, &max
, NULL
);
2191 real_maxval (&min
, 1, TYPE_MODE (type
));
2192 return build_real (type
, min
);
2196 gcc_assert (INTEGRAL_TYPE_P (type
));
2197 return TYPE_MIN_VALUE (type
);
2201 if (SCALAR_FLOAT_TYPE_P (type
))
2203 REAL_VALUE_TYPE max
;
2204 if (HONOR_INFINITIES (TYPE_MODE (type
)))
2207 real_maxval (&max
, 0, TYPE_MODE (type
));
2208 return build_real (type
, max
);
2212 gcc_assert (INTEGRAL_TYPE_P (type
));
2213 return TYPE_MAX_VALUE (type
);
2221 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2222 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2223 private variables. Initialization statements go in ILIST, while calls
2224 to destructors go in DLIST. */
2227 lower_rec_input_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*dlist
,
2230 gimple_stmt_iterator diter
;
2231 tree c
, dtor
, copyin_seq
, x
, ptr
;
2232 bool copyin_by_ref
= false;
2233 bool lastprivate_firstprivate
= false;
2236 *dlist
= gimple_seq_alloc ();
2237 diter
= gsi_start (*dlist
);
2240 /* Do all the fixed sized types in the first pass, and the variable sized
2241 types in the second pass. This makes sure that the scalar arguments to
2242 the variable sized types are processed before we use them in the
2243 variable sized operations. */
2244 for (pass
= 0; pass
< 2; ++pass
)
2246 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2248 enum omp_clause_code c_kind
= OMP_CLAUSE_CODE (c
);
2251 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2255 case OMP_CLAUSE_PRIVATE
:
2256 if (OMP_CLAUSE_PRIVATE_DEBUG (c
))
2259 case OMP_CLAUSE_SHARED
:
2260 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c
), ctx
) == NULL
)
2262 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c
)));
2265 case OMP_CLAUSE_FIRSTPRIVATE
:
2266 case OMP_CLAUSE_COPYIN
:
2267 case OMP_CLAUSE_REDUCTION
:
2269 case OMP_CLAUSE_LASTPRIVATE
:
2270 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2272 lastprivate_firstprivate
= true;
2281 new_var
= var
= OMP_CLAUSE_DECL (c
);
2282 if (c_kind
!= OMP_CLAUSE_COPYIN
)
2283 new_var
= lookup_decl (var
, ctx
);
2285 if (c_kind
== OMP_CLAUSE_SHARED
|| c_kind
== OMP_CLAUSE_COPYIN
)
2290 else if (is_variable_sized (var
))
2292 /* For variable sized types, we need to allocate the
2293 actual storage here. Call alloca and store the
2294 result in the pointer decl that we created elsewhere. */
2298 if (c_kind
!= OMP_CLAUSE_FIRSTPRIVATE
|| !is_task_ctx (ctx
))
2303 ptr
= DECL_VALUE_EXPR (new_var
);
2304 gcc_assert (TREE_CODE (ptr
) == INDIRECT_REF
);
2305 ptr
= TREE_OPERAND (ptr
, 0);
2306 gcc_assert (DECL_P (ptr
));
2307 x
= TYPE_SIZE_UNIT (TREE_TYPE (new_var
));
2309 /* void *tmp = __builtin_alloca */
2311 = gimple_build_call (built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
2312 tmp
= create_tmp_var_raw (ptr_type_node
, NULL
);
2313 gimple_add_tmp_var (tmp
);
2314 gimple_call_set_lhs (stmt
, tmp
);
2316 gimple_seq_add_stmt (ilist
, stmt
);
2318 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ptr
), tmp
);
2319 gimplify_assign (ptr
, x
, ilist
);
2322 else if (is_reference (var
))
2324 /* For references that are being privatized for Fortran,
2325 allocate new backing storage for the new pointer
2326 variable. This allows us to avoid changing all the
2327 code that expects a pointer to something that expects
2328 a direct variable. Note that this doesn't apply to
2329 C++, since reference types are disallowed in data
2330 sharing clauses there, except for NRV optimized
2335 x
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var
)));
2336 if (c_kind
== OMP_CLAUSE_FIRSTPRIVATE
&& is_task_ctx (ctx
))
2338 x
= build_receiver_ref (var
, false, ctx
);
2339 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2341 else if (TREE_CONSTANT (x
))
2343 const char *name
= NULL
;
2344 if (DECL_NAME (var
))
2345 name
= IDENTIFIER_POINTER (DECL_NAME (new_var
));
2347 x
= create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var
)),
2349 gimple_add_tmp_var (x
);
2350 TREE_ADDRESSABLE (x
) = 1;
2351 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2355 x
= build_call_expr_loc (clause_loc
,
2356 built_in_decls
[BUILT_IN_ALLOCA
], 1, x
);
2359 x
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), x
);
2360 gimplify_assign (new_var
, x
, ilist
);
2362 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2364 else if (c_kind
== OMP_CLAUSE_REDUCTION
2365 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2373 switch (OMP_CLAUSE_CODE (c
))
2375 case OMP_CLAUSE_SHARED
:
2376 /* Shared global vars are just accessed directly. */
2377 if (is_global_var (new_var
))
2379 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2380 needs to be delayed until after fixup_child_record_type so
2381 that we get the correct type during the dereference. */
2382 by_ref
= use_pointer_for_field (var
, ctx
);
2383 x
= build_receiver_ref (var
, by_ref
, ctx
);
2384 SET_DECL_VALUE_EXPR (new_var
, x
);
2385 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2387 /* ??? If VAR is not passed by reference, and the variable
2388 hasn't been initialized yet, then we'll get a warning for
2389 the store into the omp_data_s structure. Ideally, we'd be
2390 able to notice this and not store anything at all, but
2391 we're generating code too early. Suppress the warning. */
2393 TREE_NO_WARNING (var
) = 1;
2396 case OMP_CLAUSE_LASTPRIVATE
:
2397 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2401 case OMP_CLAUSE_PRIVATE
:
2402 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_PRIVATE
)
2403 x
= build_outer_var_ref (var
, ctx
);
2404 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2406 if (is_task_ctx (ctx
))
2407 x
= build_receiver_ref (var
, false, ctx
);
2409 x
= build_outer_var_ref (var
, ctx
);
2413 x
= lang_hooks
.decls
.omp_clause_default_ctor (c
, new_var
, x
);
2415 gimplify_and_add (x
, ilist
);
2419 x
= lang_hooks
.decls
.omp_clause_dtor (c
, new_var
);
2422 gimple_seq tseq
= NULL
;
2425 gimplify_stmt (&dtor
, &tseq
);
2426 gsi_insert_seq_before (&diter
, tseq
, GSI_SAME_STMT
);
2430 case OMP_CLAUSE_FIRSTPRIVATE
:
2431 if (is_task_ctx (ctx
))
2433 if (is_reference (var
) || is_variable_sized (var
))
2435 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var
,
2437 || use_pointer_for_field (var
, NULL
))
2439 x
= build_receiver_ref (var
, false, ctx
);
2440 SET_DECL_VALUE_EXPR (new_var
, x
);
2441 DECL_HAS_VALUE_EXPR_P (new_var
) = 1;
2445 x
= build_outer_var_ref (var
, ctx
);
2446 x
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, new_var
, x
);
2447 gimplify_and_add (x
, ilist
);
2451 case OMP_CLAUSE_COPYIN
:
2452 by_ref
= use_pointer_for_field (var
, NULL
);
2453 x
= build_receiver_ref (var
, by_ref
, ctx
);
2454 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, x
);
2455 append_to_statement_list (x
, ©in_seq
);
2456 copyin_by_ref
|= by_ref
;
2459 case OMP_CLAUSE_REDUCTION
:
2460 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2462 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2463 x
= build_outer_var_ref (var
, ctx
);
2465 if (is_reference (var
))
2466 x
= build_fold_addr_expr_loc (clause_loc
, x
);
2467 SET_DECL_VALUE_EXPR (placeholder
, x
);
2468 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2469 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
), ctx
);
2470 gimple_seq_add_seq (ilist
,
2471 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
));
2472 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c
) = NULL
;
2473 DECL_HAS_VALUE_EXPR_P (placeholder
) = 0;
2477 x
= omp_reduction_init (c
, TREE_TYPE (new_var
));
2478 gcc_assert (TREE_CODE (TREE_TYPE (new_var
)) != ARRAY_TYPE
);
2479 gimplify_assign (new_var
, x
, ilist
);
2489 /* The copyin sequence is not to be executed by the main thread, since
2490 that would result in self-copies. Perhaps not visible to scalars,
2491 but it certainly is to C++ operator=. */
2494 x
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
2495 x
= build2 (NE_EXPR
, boolean_type_node
, x
,
2496 build_int_cst (TREE_TYPE (x
), 0));
2497 x
= build3 (COND_EXPR
, void_type_node
, x
, copyin_seq
, NULL
);
2498 gimplify_and_add (x
, ilist
);
2501 /* If any copyin variable is passed by reference, we must ensure the
2502 master thread doesn't modify it before it is copied over in all
2503 threads. Similarly for variables in both firstprivate and
2504 lastprivate clauses we need to ensure the lastprivate copying
2505 happens after firstprivate copying in all threads. */
2506 if (copyin_by_ref
|| lastprivate_firstprivate
)
2507 gimplify_and_add (build_omp_barrier (), ilist
);
2511 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2512 both parallel and workshare constructs. PREDICATE may be NULL if it's
2516 lower_lastprivate_clauses (tree clauses
, tree predicate
, gimple_seq
*stmt_list
,
2519 tree x
, c
, label
= NULL
;
2520 bool par_clauses
= false;
2522 /* Early exit if there are no lastprivate clauses. */
2523 clauses
= find_omp_clause (clauses
, OMP_CLAUSE_LASTPRIVATE
);
2524 if (clauses
== NULL
)
2526 /* If this was a workshare clause, see if it had been combined
2527 with its parallel. In that case, look for the clauses on the
2528 parallel statement itself. */
2529 if (is_parallel_ctx (ctx
))
2533 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2536 clauses
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2537 OMP_CLAUSE_LASTPRIVATE
);
2538 if (clauses
== NULL
)
2546 tree label_true
, arm1
, arm2
;
2548 label
= create_artificial_label (UNKNOWN_LOCATION
);
2549 label_true
= create_artificial_label (UNKNOWN_LOCATION
);
2550 arm1
= TREE_OPERAND (predicate
, 0);
2551 arm2
= TREE_OPERAND (predicate
, 1);
2552 gimplify_expr (&arm1
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2553 gimplify_expr (&arm2
, stmt_list
, NULL
, is_gimple_val
, fb_rvalue
);
2554 stmt
= gimple_build_cond (TREE_CODE (predicate
), arm1
, arm2
,
2556 gimple_seq_add_stmt (stmt_list
, stmt
);
2557 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label_true
));
2560 for (c
= clauses
; c
;)
2563 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2565 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_LASTPRIVATE
)
2567 var
= OMP_CLAUSE_DECL (c
);
2568 new_var
= lookup_decl (var
, ctx
);
2570 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
))
2572 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
), ctx
);
2573 gimple_seq_add_seq (stmt_list
,
2574 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
));
2576 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c
) = NULL
;
2578 x
= build_outer_var_ref (var
, ctx
);
2579 if (is_reference (var
))
2580 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2581 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, x
, new_var
);
2582 gimplify_and_add (x
, stmt_list
);
2584 c
= OMP_CLAUSE_CHAIN (c
);
2585 if (c
== NULL
&& !par_clauses
)
2587 /* If this was a workshare clause, see if it had been combined
2588 with its parallel. In that case, continue looking for the
2589 clauses also on the parallel statement itself. */
2590 if (is_parallel_ctx (ctx
))
2594 if (ctx
== NULL
|| !is_parallel_ctx (ctx
))
2597 c
= find_omp_clause (gimple_omp_parallel_clauses (ctx
->stmt
),
2598 OMP_CLAUSE_LASTPRIVATE
);
2604 gimple_seq_add_stmt (stmt_list
, gimple_build_label (label
));
2608 /* Generate code to implement the REDUCTION clauses. */
2611 lower_reduction_clauses (tree clauses
, gimple_seq
*stmt_seqp
, omp_context
*ctx
)
2613 gimple_seq sub_seq
= NULL
;
2618 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2619 update in that case, otherwise use a lock. */
2620 for (c
= clauses
; c
&& count
< 2; c
= OMP_CLAUSE_CHAIN (c
))
2621 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_REDUCTION
)
2623 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2625 /* Never use OMP_ATOMIC for array reductions. */
2635 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2637 tree var
, ref
, new_var
;
2638 enum tree_code code
;
2639 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2641 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_REDUCTION
)
2644 var
= OMP_CLAUSE_DECL (c
);
2645 new_var
= lookup_decl (var
, ctx
);
2646 if (is_reference (var
))
2647 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2648 ref
= build_outer_var_ref (var
, ctx
);
2649 code
= OMP_CLAUSE_REDUCTION_CODE (c
);
2651 /* reduction(-:var) sums up the partial results, so it acts
2652 identically to reduction(+:var). */
2653 if (code
== MINUS_EXPR
)
2658 tree addr
= build_fold_addr_expr_loc (clause_loc
, ref
);
2660 addr
= save_expr (addr
);
2661 ref
= build1 (INDIRECT_REF
, TREE_TYPE (TREE_TYPE (addr
)), addr
);
2662 x
= fold_build2_loc (clause_loc
, code
, TREE_TYPE (ref
), ref
, new_var
);
2663 x
= build2 (OMP_ATOMIC
, void_type_node
, addr
, x
);
2664 gimplify_and_add (x
, stmt_seqp
);
2668 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
))
2670 tree placeholder
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
);
2672 if (is_reference (var
))
2673 ref
= build_fold_addr_expr_loc (clause_loc
, ref
);
2674 SET_DECL_VALUE_EXPR (placeholder
, ref
);
2675 DECL_HAS_VALUE_EXPR_P (placeholder
) = 1;
2676 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
), ctx
);
2677 gimple_seq_add_seq (&sub_seq
, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
));
2678 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c
) = NULL
;
2679 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c
) = NULL
;
2683 x
= build2 (code
, TREE_TYPE (ref
), ref
, new_var
);
2684 ref
= build_outer_var_ref (var
, ctx
);
2685 gimplify_assign (ref
, x
, &sub_seq
);
2689 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
], 0);
2690 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2692 gimple_seq_add_seq (stmt_seqp
, sub_seq
);
2694 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
], 0);
2695 gimple_seq_add_stmt (stmt_seqp
, stmt
);
2699 /* Generate code to implement the COPYPRIVATE clauses. */
2702 lower_copyprivate_clauses (tree clauses
, gimple_seq
*slist
, gimple_seq
*rlist
,
2707 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2709 tree var
, new_var
, ref
, x
;
2711 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2713 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYPRIVATE
)
2716 var
= OMP_CLAUSE_DECL (c
);
2717 by_ref
= use_pointer_for_field (var
, NULL
);
2719 ref
= build_sender_ref (var
, ctx
);
2720 x
= new_var
= lookup_decl_in_outer_ctx (var
, ctx
);
2723 x
= build_fold_addr_expr_loc (clause_loc
, new_var
);
2724 x
= fold_convert_loc (clause_loc
, TREE_TYPE (ref
), x
);
2726 gimplify_assign (ref
, x
, slist
);
2728 ref
= build_receiver_ref (var
, false, ctx
);
2731 ref
= fold_convert_loc (clause_loc
,
2732 build_pointer_type (TREE_TYPE (new_var
)),
2734 ref
= build_fold_indirect_ref_loc (clause_loc
, ref
);
2736 if (is_reference (var
))
2738 ref
= fold_convert_loc (clause_loc
, TREE_TYPE (new_var
), ref
);
2739 ref
= build_simple_mem_ref_loc (clause_loc
, ref
);
2740 new_var
= build_simple_mem_ref_loc (clause_loc
, new_var
);
2742 x
= lang_hooks
.decls
.omp_clause_assign_op (c
, new_var
, ref
);
2743 gimplify_and_add (x
, rlist
);
2748 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2749 and REDUCTION from the sender (aka parent) side. */
2752 lower_send_clauses (tree clauses
, gimple_seq
*ilist
, gimple_seq
*olist
,
2757 for (c
= clauses
; c
; c
= OMP_CLAUSE_CHAIN (c
))
2759 tree val
, ref
, x
, var
;
2760 bool by_ref
, do_in
= false, do_out
= false;
2761 location_t clause_loc
= OMP_CLAUSE_LOCATION (c
);
2763 switch (OMP_CLAUSE_CODE (c
))
2765 case OMP_CLAUSE_PRIVATE
:
2766 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
2769 case OMP_CLAUSE_FIRSTPRIVATE
:
2770 case OMP_CLAUSE_COPYIN
:
2771 case OMP_CLAUSE_LASTPRIVATE
:
2772 case OMP_CLAUSE_REDUCTION
:
2778 val
= OMP_CLAUSE_DECL (c
);
2779 var
= lookup_decl_in_outer_ctx (val
, ctx
);
2781 if (OMP_CLAUSE_CODE (c
) != OMP_CLAUSE_COPYIN
2782 && is_global_var (var
))
2784 if (is_variable_sized (val
))
2786 by_ref
= use_pointer_for_field (val
, NULL
);
2788 switch (OMP_CLAUSE_CODE (c
))
2790 case OMP_CLAUSE_PRIVATE
:
2791 case OMP_CLAUSE_FIRSTPRIVATE
:
2792 case OMP_CLAUSE_COPYIN
:
2796 case OMP_CLAUSE_LASTPRIVATE
:
2797 if (by_ref
|| is_reference (val
))
2799 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c
))
2806 if (lang_hooks
.decls
.omp_private_outer_ref (val
))
2811 case OMP_CLAUSE_REDUCTION
:
2813 do_out
= !(by_ref
|| is_reference (val
));
2822 ref
= build_sender_ref (val
, ctx
);
2823 x
= by_ref
? build_fold_addr_expr_loc (clause_loc
, var
) : var
;
2824 gimplify_assign (ref
, x
, ilist
);
2825 if (is_task_ctx (ctx
))
2826 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref
, 1)) = NULL
;
2831 ref
= build_sender_ref (val
, ctx
);
2832 gimplify_assign (var
, ref
, olist
);
2837 /* Generate code to implement SHARED from the sender (aka parent)
2838 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2839 list things that got automatically shared. */
2842 lower_send_shared_vars (gimple_seq
*ilist
, gimple_seq
*olist
, omp_context
*ctx
)
2844 tree var
, ovar
, nvar
, f
, x
, record_type
;
2846 if (ctx
->record_type
== NULL
)
2849 record_type
= ctx
->srecord_type
? ctx
->srecord_type
: ctx
->record_type
;
2850 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
2852 ovar
= DECL_ABSTRACT_ORIGIN (f
);
2853 nvar
= maybe_lookup_decl (ovar
, ctx
);
2854 if (!nvar
|| !DECL_HAS_VALUE_EXPR_P (nvar
))
2857 /* If CTX is a nested parallel directive. Find the immediately
2858 enclosing parallel or workshare construct that contains a
2859 mapping for OVAR. */
2860 var
= lookup_decl_in_outer_ctx (ovar
, ctx
);
2862 if (use_pointer_for_field (ovar
, ctx
))
2864 x
= build_sender_ref (ovar
, ctx
);
2865 var
= build_fold_addr_expr (var
);
2866 gimplify_assign (x
, var
, ilist
);
2870 x
= build_sender_ref (ovar
, ctx
);
2871 gimplify_assign (x
, var
, ilist
);
2873 if (!TREE_READONLY (var
)
2874 /* We don't need to receive a new reference to a result
2875 or parm decl. In fact we may not store to it as we will
2876 invalidate any pending RSO and generate wrong gimple
2878 && !((TREE_CODE (var
) == RESULT_DECL
2879 || TREE_CODE (var
) == PARM_DECL
)
2880 && DECL_BY_REFERENCE (var
)))
2882 x
= build_sender_ref (ovar
, ctx
);
2883 gimplify_assign (var
, x
, olist
);
2890 /* A convenience function to build an empty GIMPLE_COND with just the
2894 gimple_build_cond_empty (tree cond
)
2896 enum tree_code pred_code
;
2899 gimple_cond_get_ops_from_tree (cond
, &pred_code
, &lhs
, &rhs
);
2900 return gimple_build_cond (pred_code
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
2904 /* Build the function calls to GOMP_parallel_start etc to actually
2905 generate the parallel operation. REGION is the parallel region
2906 being expanded. BB is the block where to insert the code. WS_ARGS
2907 will be set if this is a call to a combined parallel+workshare
2908 construct, it contains the list of additional arguments needed by
2909 the workshare construct. */
2912 expand_parallel_call (struct omp_region
*region
, basic_block bb
,
2913 gimple entry_stmt
, VEC(tree
,gc
) *ws_args
)
2915 tree t
, t1
, t2
, val
, cond
, c
, clauses
;
2916 gimple_stmt_iterator gsi
;
2919 location_t clause_loc
;
2922 clauses
= gimple_omp_parallel_clauses (entry_stmt
);
2924 /* Determine what flavor of GOMP_parallel_start we will be
2926 start_ix
= BUILT_IN_GOMP_PARALLEL_START
;
2927 if (is_combined_parallel (region
))
2929 switch (region
->inner
->type
)
2931 case GIMPLE_OMP_FOR
:
2932 gcc_assert (region
->inner
->sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
2933 start_ix
= BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2934 + (region
->inner
->sched_kind
2935 == OMP_CLAUSE_SCHEDULE_RUNTIME
2936 ? 3 : region
->inner
->sched_kind
);
2938 case GIMPLE_OMP_SECTIONS
:
2939 start_ix
= BUILT_IN_GOMP_PARALLEL_SECTIONS_START
;
2946 /* By default, the value of NUM_THREADS is zero (selected at run time)
2947 and there is no conditional. */
2949 val
= build_int_cst (unsigned_type_node
, 0);
2951 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
2953 cond
= OMP_CLAUSE_IF_EXPR (c
);
2955 c
= find_omp_clause (clauses
, OMP_CLAUSE_NUM_THREADS
);
2958 val
= OMP_CLAUSE_NUM_THREADS_EXPR (c
);
2959 clause_loc
= OMP_CLAUSE_LOCATION (c
);
2962 clause_loc
= gimple_location (entry_stmt
);
2964 /* Ensure 'val' is of the correct type. */
2965 val
= fold_convert_loc (clause_loc
, unsigned_type_node
, val
);
2967 /* If we found the clause 'if (cond)', build either
2968 (cond != 0) or (cond ? val : 1u). */
2971 gimple_stmt_iterator gsi
;
2973 cond
= gimple_boolify (cond
);
2975 if (integer_zerop (val
))
2976 val
= fold_build2_loc (clause_loc
,
2977 EQ_EXPR
, unsigned_type_node
, cond
,
2978 build_int_cst (TREE_TYPE (cond
), 0));
2981 basic_block cond_bb
, then_bb
, else_bb
;
2982 edge e
, e_then
, e_else
;
2983 tree tmp_then
, tmp_else
, tmp_join
, tmp_var
;
2985 tmp_var
= create_tmp_var (TREE_TYPE (val
), NULL
);
2986 if (gimple_in_ssa_p (cfun
))
2988 tmp_then
= make_ssa_name (tmp_var
, NULL
);
2989 tmp_else
= make_ssa_name (tmp_var
, NULL
);
2990 tmp_join
= make_ssa_name (tmp_var
, NULL
);
2999 e
= split_block (bb
, NULL
);
3004 then_bb
= create_empty_bb (cond_bb
);
3005 else_bb
= create_empty_bb (then_bb
);
3006 set_immediate_dominator (CDI_DOMINATORS
, then_bb
, cond_bb
);
3007 set_immediate_dominator (CDI_DOMINATORS
, else_bb
, cond_bb
);
3009 stmt
= gimple_build_cond_empty (cond
);
3010 gsi
= gsi_start_bb (cond_bb
);
3011 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3013 gsi
= gsi_start_bb (then_bb
);
3014 stmt
= gimple_build_assign (tmp_then
, val
);
3015 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3017 gsi
= gsi_start_bb (else_bb
);
3018 stmt
= gimple_build_assign
3019 (tmp_else
, build_int_cst (unsigned_type_node
, 1));
3020 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3022 make_edge (cond_bb
, then_bb
, EDGE_TRUE_VALUE
);
3023 make_edge (cond_bb
, else_bb
, EDGE_FALSE_VALUE
);
3024 e_then
= make_edge (then_bb
, bb
, EDGE_FALLTHRU
);
3025 e_else
= make_edge (else_bb
, bb
, EDGE_FALLTHRU
);
3027 if (gimple_in_ssa_p (cfun
))
3029 gimple phi
= create_phi_node (tmp_join
, bb
);
3030 SSA_NAME_DEF_STMT (tmp_join
) = phi
;
3031 add_phi_arg (phi
, tmp_then
, e_then
, UNKNOWN_LOCATION
);
3032 add_phi_arg (phi
, tmp_else
, e_else
, UNKNOWN_LOCATION
);
3038 gsi
= gsi_start_bb (bb
);
3039 val
= force_gimple_operand_gsi (&gsi
, val
, true, NULL_TREE
,
3040 false, GSI_CONTINUE_LINKING
);
3043 gsi
= gsi_last_bb (bb
);
3044 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3046 t1
= null_pointer_node
;
3048 t1
= build_fold_addr_expr (t
);
3049 t2
= build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt
));
3051 args
= VEC_alloc (tree
, gc
, 3 + VEC_length (tree
, ws_args
));
3052 VEC_quick_push (tree
, args
, t2
);
3053 VEC_quick_push (tree
, args
, t1
);
3054 VEC_quick_push (tree
, args
, val
);
3055 VEC_splice (tree
, args
, ws_args
);
3057 t
= build_call_expr_loc_vec (UNKNOWN_LOCATION
,
3058 built_in_decls
[start_ix
], args
);
3060 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3061 false, GSI_CONTINUE_LINKING
);
3063 t
= gimple_omp_parallel_data_arg (entry_stmt
);
3065 t
= null_pointer_node
;
3067 t
= build_fold_addr_expr (t
);
3068 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3069 gimple_omp_parallel_child_fn (entry_stmt
), 1, t
);
3070 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3071 false, GSI_CONTINUE_LINKING
);
3073 t
= build_call_expr_loc (gimple_location (entry_stmt
),
3074 built_in_decls
[BUILT_IN_GOMP_PARALLEL_END
], 0);
3075 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3076 false, GSI_CONTINUE_LINKING
);
3080 /* Build the function call to GOMP_task to actually
3081 generate the task operation. BB is the block where to insert the code. */
3084 expand_task_call (basic_block bb
, gimple entry_stmt
)
3086 tree t
, t1
, t2
, t3
, flags
, cond
, c
, clauses
;
3087 gimple_stmt_iterator gsi
;
3088 location_t loc
= gimple_location (entry_stmt
);
3090 clauses
= gimple_omp_task_clauses (entry_stmt
);
3092 c
= find_omp_clause (clauses
, OMP_CLAUSE_IF
);
3094 cond
= gimple_boolify (OMP_CLAUSE_IF_EXPR (c
));
3096 cond
= boolean_true_node
;
3098 c
= find_omp_clause (clauses
, OMP_CLAUSE_UNTIED
);
3099 flags
= build_int_cst (unsigned_type_node
, (c
? 1 : 0));
3101 gsi
= gsi_last_bb (bb
);
3102 t
= gimple_omp_task_data_arg (entry_stmt
);
3104 t2
= null_pointer_node
;
3106 t2
= build_fold_addr_expr_loc (loc
, t
);
3107 t1
= build_fold_addr_expr_loc (loc
, gimple_omp_task_child_fn (entry_stmt
));
3108 t
= gimple_omp_task_copy_fn (entry_stmt
);
3110 t3
= null_pointer_node
;
3112 t3
= build_fold_addr_expr_loc (loc
, t
);
3114 t
= build_call_expr (built_in_decls
[BUILT_IN_GOMP_TASK
], 7, t1
, t2
, t3
,
3115 gimple_omp_task_arg_size (entry_stmt
),
3116 gimple_omp_task_arg_align (entry_stmt
), cond
, flags
);
3118 force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3119 false, GSI_CONTINUE_LINKING
);
3123 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3124 catch handler and return it. This prevents programs from violating the
3125 structured block semantics with throws. */
3128 maybe_catch_exception (gimple_seq body
)
3133 if (!flag_exceptions
)
3136 if (lang_hooks
.eh_protect_cleanup_actions
!= NULL
)
3137 decl
= lang_hooks
.eh_protect_cleanup_actions ();
3139 decl
= built_in_decls
[BUILT_IN_TRAP
];
3141 g
= gimple_build_eh_must_not_throw (decl
);
3142 g
= gimple_build_try (body
, gimple_seq_alloc_with_stmt (g
),
3145 return gimple_seq_alloc_with_stmt (g
);
3148 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3151 vec2chain (VEC(tree
,gc
) *v
)
3153 tree chain
= NULL_TREE
, t
;
3156 FOR_EACH_VEC_ELT_REVERSE (tree
, v
, ix
, t
)
3158 DECL_CHAIN (t
) = chain
;
3166 /* Remove barriers in REGION->EXIT's block. Note that this is only
3167 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3168 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3169 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3173 remove_exit_barrier (struct omp_region
*region
)
3175 gimple_stmt_iterator gsi
;
3176 basic_block exit_bb
;
3180 int any_addressable_vars
= -1;
3182 exit_bb
= region
->exit
;
3184 /* If the parallel region doesn't return, we don't have REGION->EXIT
3189 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3190 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3191 statements that can appear in between are extremely limited -- no
3192 memory operations at all. Here, we allow nothing at all, so the
3193 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3194 gsi
= gsi_last_bb (exit_bb
);
3195 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3197 if (!gsi_end_p (gsi
) && gimple_code (gsi_stmt (gsi
)) != GIMPLE_LABEL
)
3200 FOR_EACH_EDGE (e
, ei
, exit_bb
->preds
)
3202 gsi
= gsi_last_bb (e
->src
);
3203 if (gsi_end_p (gsi
))
3205 stmt
= gsi_stmt (gsi
);
3206 if (gimple_code (stmt
) == GIMPLE_OMP_RETURN
3207 && !gimple_omp_return_nowait_p (stmt
))
3209 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3210 in many cases. If there could be tasks queued, the barrier
3211 might be needed to let the tasks run before some local
3212 variable of the parallel that the task uses as shared
3213 runs out of scope. The task can be spawned either
3214 from within current function (this would be easy to check)
3215 or from some function it calls and gets passed an address
3216 of such a variable. */
3217 if (any_addressable_vars
< 0)
3219 gimple parallel_stmt
= last_stmt (region
->entry
);
3220 tree child_fun
= gimple_omp_parallel_child_fn (parallel_stmt
);
3221 tree local_decls
, block
, decl
;
3224 any_addressable_vars
= 0;
3225 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun
), ix
, decl
)
3226 if (TREE_ADDRESSABLE (decl
))
3228 any_addressable_vars
= 1;
3231 for (block
= gimple_block (stmt
);
3232 !any_addressable_vars
3234 && TREE_CODE (block
) == BLOCK
;
3235 block
= BLOCK_SUPERCONTEXT (block
))
3237 for (local_decls
= BLOCK_VARS (block
);
3239 local_decls
= DECL_CHAIN (local_decls
))
3240 if (TREE_ADDRESSABLE (local_decls
))
3242 any_addressable_vars
= 1;
3245 if (block
== gimple_block (parallel_stmt
))
3249 if (!any_addressable_vars
)
3250 gimple_omp_return_set_nowait (stmt
);
3256 remove_exit_barriers (struct omp_region
*region
)
3258 if (region
->type
== GIMPLE_OMP_PARALLEL
)
3259 remove_exit_barrier (region
);
3263 region
= region
->inner
;
3264 remove_exit_barriers (region
);
3265 while (region
->next
)
3267 region
= region
->next
;
3268 remove_exit_barriers (region
);
3273 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3274 calls. These can't be declared as const functions, but
3275 within one parallel body they are constant, so they can be
3276 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3277 which are declared const. Similarly for task body, except
3278 that in untied task omp_get_thread_num () can change at any task
3279 scheduling point. */
3282 optimize_omp_library_calls (gimple entry_stmt
)
3285 gimple_stmt_iterator gsi
;
3287 = DECL_ASSEMBLER_NAME (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
]);
3289 = DECL_ASSEMBLER_NAME (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
]);
3290 bool untied_task
= (gimple_code (entry_stmt
) == GIMPLE_OMP_TASK
3291 && find_omp_clause (gimple_omp_task_clauses (entry_stmt
),
3292 OMP_CLAUSE_UNTIED
) != NULL
);
3295 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3297 gimple call
= gsi_stmt (gsi
);
3300 if (is_gimple_call (call
)
3301 && (decl
= gimple_call_fndecl (call
))
3302 && DECL_EXTERNAL (decl
)
3303 && TREE_PUBLIC (decl
)
3304 && DECL_INITIAL (decl
) == NULL
)
3308 if (DECL_NAME (decl
) == thr_num_id
)
3310 /* In #pragma omp task untied omp_get_thread_num () can change
3311 during the execution of the task region. */
3314 built_in
= built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
];
3316 else if (DECL_NAME (decl
) == num_thr_id
)
3317 built_in
= built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
];
3321 if (DECL_ASSEMBLER_NAME (decl
) != DECL_ASSEMBLER_NAME (built_in
)
3322 || gimple_call_num_args (call
) != 0)
3325 if (flag_exceptions
&& !TREE_NOTHROW (decl
))
3328 if (TREE_CODE (TREE_TYPE (decl
)) != FUNCTION_TYPE
3329 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl
)),
3330 TREE_TYPE (TREE_TYPE (built_in
))))
3333 gimple_call_set_fndecl (call
, built_in
);
3338 /* Expand the OpenMP parallel or task directive starting at REGION. */
3341 expand_omp_taskreg (struct omp_region
*region
)
3343 basic_block entry_bb
, exit_bb
, new_bb
;
3344 struct function
*child_cfun
;
3345 tree child_fn
, block
, t
;
3347 gimple_stmt_iterator gsi
;
3348 gimple entry_stmt
, stmt
;
3350 VEC(tree
,gc
) *ws_args
;
3352 entry_stmt
= last_stmt (region
->entry
);
3353 child_fn
= gimple_omp_taskreg_child_fn (entry_stmt
);
3354 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
3355 /* If this function has been already instrumented, make sure
3356 the child function isn't instrumented again. */
3357 child_cfun
->after_tree_profile
= cfun
->after_tree_profile
;
3359 entry_bb
= region
->entry
;
3360 exit_bb
= region
->exit
;
3362 if (is_combined_parallel (region
))
3363 ws_args
= region
->ws_args
;
3367 if (child_cfun
->cfg
)
3369 /* Due to inlining, it may happen that we have already outlined
3370 the region, in which case all we need to do is make the
3371 sub-graph unreachable and emit the parallel call. */
3372 edge entry_succ_e
, exit_succ_e
;
3373 gimple_stmt_iterator gsi
;
3375 entry_succ_e
= single_succ_edge (entry_bb
);
3377 gsi
= gsi_last_bb (entry_bb
);
3378 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_PARALLEL
3379 || gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_TASK
);
3380 gsi_remove (&gsi
, true);
3385 exit_succ_e
= single_succ_edge (exit_bb
);
3386 make_edge (new_bb
, exit_succ_e
->dest
, EDGE_FALLTHRU
);
3388 remove_edge_and_dominated_blocks (entry_succ_e
);
3392 unsigned srcidx
, dstidx
, num
;
3394 /* If the parallel region needs data sent from the parent
3395 function, then the very first statement (except possible
3396 tree profile counter updates) of the parallel body
3397 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3398 &.OMP_DATA_O is passed as an argument to the child function,
3399 we need to replace it with the argument as seen by the child
3402 In most cases, this will end up being the identity assignment
3403 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3404 a function call that has been inlined, the original PARM_DECL
3405 .OMP_DATA_I may have been converted into a different local
3406 variable. In which case, we need to keep the assignment. */
3407 if (gimple_omp_taskreg_data_arg (entry_stmt
))
3409 basic_block entry_succ_bb
= single_succ (entry_bb
);
3410 gimple_stmt_iterator gsi
;
3412 gimple parcopy_stmt
= NULL
;
3414 for (gsi
= gsi_start_bb (entry_succ_bb
); ; gsi_next (&gsi
))
3418 gcc_assert (!gsi_end_p (gsi
));
3419 stmt
= gsi_stmt (gsi
);
3420 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3423 if (gimple_num_ops (stmt
) == 2)
3425 tree arg
= gimple_assign_rhs1 (stmt
);
3427 /* We're ignore the subcode because we're
3428 effectively doing a STRIP_NOPS. */
3430 if (TREE_CODE (arg
) == ADDR_EXPR
3431 && TREE_OPERAND (arg
, 0)
3432 == gimple_omp_taskreg_data_arg (entry_stmt
))
3434 parcopy_stmt
= stmt
;
3440 gcc_assert (parcopy_stmt
!= NULL
);
3441 arg
= DECL_ARGUMENTS (child_fn
);
3443 if (!gimple_in_ssa_p (cfun
))
3445 if (gimple_assign_lhs (parcopy_stmt
) == arg
)
3446 gsi_remove (&gsi
, true);
3449 /* ?? Is setting the subcode really necessary ?? */
3450 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (arg
));
3451 gimple_assign_set_rhs1 (parcopy_stmt
, arg
);
3456 /* If we are in ssa form, we must load the value from the default
3457 definition of the argument. That should not be defined now,
3458 since the argument is not used uninitialized. */
3459 gcc_assert (gimple_default_def (cfun
, arg
) == NULL
);
3460 narg
= make_ssa_name (arg
, gimple_build_nop ());
3461 set_default_def (arg
, narg
);
3462 /* ?? Is setting the subcode really necessary ?? */
3463 gimple_omp_set_subcode (parcopy_stmt
, TREE_CODE (narg
));
3464 gimple_assign_set_rhs1 (parcopy_stmt
, narg
);
3465 update_stmt (parcopy_stmt
);
3469 /* Declare local variables needed in CHILD_CFUN. */
3470 block
= DECL_INITIAL (child_fn
);
3471 BLOCK_VARS (block
) = vec2chain (child_cfun
->local_decls
);
3472 /* The gimplifier could record temporaries in parallel/task block
3473 rather than in containing function's local_decls chain,
3474 which would mean cgraph missed finalizing them. Do it now. */
3475 for (t
= BLOCK_VARS (block
); t
; t
= DECL_CHAIN (t
))
3476 if (TREE_CODE (t
) == VAR_DECL
3478 && !DECL_EXTERNAL (t
))
3479 varpool_finalize_decl (t
);
3480 DECL_SAVED_TREE (child_fn
) = NULL
;
3481 gimple_set_body (child_fn
, bb_seq (single_succ (entry_bb
)));
3482 TREE_USED (block
) = 1;
3484 /* Reset DECL_CONTEXT on function arguments. */
3485 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
3486 DECL_CONTEXT (t
) = child_fn
;
3488 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3489 so that it can be moved to the child function. */
3490 gsi
= gsi_last_bb (entry_bb
);
3491 stmt
= gsi_stmt (gsi
);
3492 gcc_assert (stmt
&& (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
3493 || gimple_code (stmt
) == GIMPLE_OMP_TASK
));
3494 gsi_remove (&gsi
, true);
3495 e
= split_block (entry_bb
, stmt
);
3497 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
3499 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3502 gsi
= gsi_last_bb (exit_bb
);
3503 gcc_assert (!gsi_end_p (gsi
)
3504 && gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_RETURN
);
3505 stmt
= gimple_build_return (NULL
);
3506 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
3507 gsi_remove (&gsi
, true);
3510 /* Move the parallel region into CHILD_CFUN. */
3512 if (gimple_in_ssa_p (cfun
))
3514 push_cfun (child_cfun
);
3515 init_tree_ssa (child_cfun
);
3516 init_ssa_operands ();
3517 cfun
->gimple_df
->in_ssa_p
= true;
3522 block
= gimple_block (entry_stmt
);
3524 new_bb
= move_sese_region_to_fn (child_cfun
, entry_bb
, exit_bb
, block
);
3526 single_succ_edge (new_bb
)->flags
= EDGE_FALLTHRU
;
3528 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3529 num
= VEC_length (tree
, child_cfun
->local_decls
);
3530 for (srcidx
= 0, dstidx
= 0; srcidx
< num
; srcidx
++)
3532 t
= VEC_index (tree
, child_cfun
->local_decls
, srcidx
);
3533 if (DECL_CONTEXT (t
) == cfun
->decl
)
3535 if (srcidx
!= dstidx
)
3536 VEC_replace (tree
, child_cfun
->local_decls
, dstidx
, t
);
3540 VEC_truncate (tree
, child_cfun
->local_decls
, dstidx
);
3542 /* Inform the callgraph about the new function. */
3543 DECL_STRUCT_FUNCTION (child_fn
)->curr_properties
3544 = cfun
->curr_properties
;
3545 cgraph_add_new_function (child_fn
, true);
3547 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3548 fixed in a following pass. */
3549 push_cfun (child_cfun
);
3550 save_current
= current_function_decl
;
3551 current_function_decl
= child_fn
;
3553 optimize_omp_library_calls (entry_stmt
);
3554 rebuild_cgraph_edges ();
3556 /* Some EH regions might become dead, see PR34608. If
3557 pass_cleanup_cfg isn't the first pass to happen with the
3558 new child, these dead EH edges might cause problems.
3559 Clean them up now. */
3560 if (flag_exceptions
)
3563 bool changed
= false;
3566 changed
|= gimple_purge_dead_eh_edges (bb
);
3568 cleanup_tree_cfg ();
3570 if (gimple_in_ssa_p (cfun
))
3571 update_ssa (TODO_update_ssa
);
3572 current_function_decl
= save_current
;
3576 /* Emit a library call to launch the children threads. */
3577 if (gimple_code (entry_stmt
) == GIMPLE_OMP_PARALLEL
)
3578 expand_parallel_call (region
, new_bb
, entry_stmt
, ws_args
);
3580 expand_task_call (new_bb
, entry_stmt
);
3581 update_ssa (TODO_update_ssa_only_virtuals
);
3585 /* A subroutine of expand_omp_for. Generate code for a parallel
3586 loop with any schedule. Given parameters:
3588 for (V = N1; V cond N2; V += STEP) BODY;
3590 where COND is "<" or ">", we generate pseudocode
3592 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3593 if (more) goto L0; else goto L3;
3600 if (V cond iend) goto L1; else goto L2;
3602 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3605 If this is a combined omp parallel loop, instead of the call to
3606 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3608 For collapsed loops, given parameters:
3610 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3611 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3612 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3615 we generate pseudocode
3621 count3 = (adj + N32 - N31) / STEP3;
3626 count2 = (adj + N22 - N21) / STEP2;
3631 count1 = (adj + N12 - N11) / STEP1;
3632 count = count1 * count2 * count3;
3633 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3634 if (more) goto L0; else goto L3;
3638 V3 = N31 + (T % count3) * STEP3;
3640 V2 = N21 + (T % count2) * STEP2;
3642 V1 = N11 + T * STEP1;
3647 if (V < iend) goto L10; else goto L2;
3650 if (V3 cond3 N32) goto L1; else goto L11;
3654 if (V2 cond2 N22) goto L1; else goto L12;
3660 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3666 expand_omp_for_generic (struct omp_region
*region
,
3667 struct omp_for_data
*fd
,
3668 enum built_in_function start_fn
,
3669 enum built_in_function next_fn
)
3671 tree type
, istart0
, iend0
, iend
;
3672 tree t
, vmain
, vback
, bias
= NULL_TREE
;
3673 basic_block entry_bb
, cont_bb
, exit_bb
, l0_bb
, l1_bb
, collapse_bb
;
3674 basic_block l2_bb
= NULL
, l3_bb
= NULL
;
3675 gimple_stmt_iterator gsi
;
3677 bool in_combined_parallel
= is_combined_parallel (region
);
3678 bool broken_loop
= region
->cont
== NULL
;
3680 tree
*counts
= NULL
;
3683 gcc_assert (!broken_loop
|| !in_combined_parallel
);
3684 gcc_assert (fd
->iter_type
== long_integer_type_node
3685 || !in_combined_parallel
);
3687 type
= TREE_TYPE (fd
->loop
.v
);
3688 istart0
= create_tmp_var (fd
->iter_type
, ".istart0");
3689 iend0
= create_tmp_var (fd
->iter_type
, ".iend0");
3690 TREE_ADDRESSABLE (istart0
) = 1;
3691 TREE_ADDRESSABLE (iend0
) = 1;
3692 if (gimple_in_ssa_p (cfun
))
3694 add_referenced_var (istart0
);
3695 add_referenced_var (iend0
);
3698 /* See if we need to bias by LLONG_MIN. */
3699 if (fd
->iter_type
== long_long_unsigned_type_node
3700 && TREE_CODE (type
) == INTEGER_TYPE
3701 && !TYPE_UNSIGNED (type
))
3705 if (fd
->loop
.cond_code
== LT_EXPR
)
3708 n2
= fold_build2 (PLUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3712 n1
= fold_build2 (MINUS_EXPR
, type
, fd
->loop
.n2
, fd
->loop
.step
);
3715 if (TREE_CODE (n1
) != INTEGER_CST
3716 || TREE_CODE (n2
) != INTEGER_CST
3717 || ((tree_int_cst_sgn (n1
) < 0) ^ (tree_int_cst_sgn (n2
) < 0)))
3718 bias
= fold_convert (fd
->iter_type
, TYPE_MIN_VALUE (type
));
3721 entry_bb
= region
->entry
;
3722 cont_bb
= region
->cont
;
3724 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
3725 gcc_assert (broken_loop
3726 || BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
3727 l0_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
3728 l1_bb
= single_succ (l0_bb
);
3731 l2_bb
= create_empty_bb (cont_bb
);
3732 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== l1_bb
);
3733 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
3737 l3_bb
= BRANCH_EDGE (entry_bb
)->dest
;
3738 exit_bb
= region
->exit
;
3740 gsi
= gsi_last_bb (entry_bb
);
3742 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
3743 if (fd
->collapse
> 1)
3745 /* collapsed loops need work for expansion in SSA form. */
3746 gcc_assert (!gimple_in_ssa_p (cfun
));
3747 counts
= (tree
*) alloca (fd
->collapse
* sizeof (tree
));
3748 for (i
= 0; i
< fd
->collapse
; i
++)
3750 tree itype
= TREE_TYPE (fd
->loops
[i
].v
);
3752 if (POINTER_TYPE_P (itype
))
3753 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (itype
), 0);
3754 t
= build_int_cst (itype
, (fd
->loops
[i
].cond_code
== LT_EXPR
3756 t
= fold_build2 (PLUS_EXPR
, itype
,
3757 fold_convert (itype
, fd
->loops
[i
].step
), t
);
3758 t
= fold_build2 (PLUS_EXPR
, itype
, t
,
3759 fold_convert (itype
, fd
->loops
[i
].n2
));
3760 t
= fold_build2 (MINUS_EXPR
, itype
, t
,
3761 fold_convert (itype
, fd
->loops
[i
].n1
));
3762 if (TYPE_UNSIGNED (itype
) && fd
->loops
[i
].cond_code
== GT_EXPR
)
3763 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
3764 fold_build1 (NEGATE_EXPR
, itype
, t
),
3765 fold_build1 (NEGATE_EXPR
, itype
,
3766 fold_convert (itype
,
3767 fd
->loops
[i
].step
)));
3769 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
,
3770 fold_convert (itype
, fd
->loops
[i
].step
));
3771 t
= fold_convert (type
, t
);
3772 if (TREE_CODE (t
) == INTEGER_CST
)
3776 counts
[i
] = create_tmp_var (type
, ".count");
3777 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3778 true, GSI_SAME_STMT
);
3779 stmt
= gimple_build_assign (counts
[i
], t
);
3780 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3782 if (SSA_VAR_P (fd
->loop
.n2
))
3788 t
= fold_build2 (MULT_EXPR
, type
, fd
->loop
.n2
, counts
[i
]);
3789 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3790 true, GSI_SAME_STMT
);
3792 stmt
= gimple_build_assign (fd
->loop
.n2
, t
);
3793 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3797 if (in_combined_parallel
)
3799 /* In a combined parallel loop, emit a call to
3800 GOMP_loop_foo_next. */
3801 t
= build_call_expr (built_in_decls
[next_fn
], 2,
3802 build_fold_addr_expr (istart0
),
3803 build_fold_addr_expr (iend0
));
3807 tree t0
, t1
, t2
, t3
, t4
;
3808 /* If this is not a combined parallel loop, emit a call to
3809 GOMP_loop_foo_start in ENTRY_BB. */
3810 t4
= build_fold_addr_expr (iend0
);
3811 t3
= build_fold_addr_expr (istart0
);
3812 t2
= fold_convert (fd
->iter_type
, fd
->loop
.step
);
3813 if (POINTER_TYPE_P (type
)
3814 && TYPE_PRECISION (type
) != TYPE_PRECISION (fd
->iter_type
))
3816 /* Avoid casting pointers to integer of a different size. */
3818 = lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
3819 t1
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n2
));
3820 t0
= fold_convert (fd
->iter_type
, fold_convert (itype
, fd
->loop
.n1
));
3824 t1
= fold_convert (fd
->iter_type
, fd
->loop
.n2
);
3825 t0
= fold_convert (fd
->iter_type
, fd
->loop
.n1
);
3829 t1
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t1
, bias
);
3830 t0
= fold_build2 (PLUS_EXPR
, fd
->iter_type
, t0
, bias
);
3832 if (fd
->iter_type
== long_integer_type_node
)
3836 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3837 t
= build_call_expr (built_in_decls
[start_fn
], 6,
3838 t0
, t1
, t2
, t
, t3
, t4
);
3841 t
= build_call_expr (built_in_decls
[start_fn
], 5,
3842 t0
, t1
, t2
, t3
, t4
);
3849 /* The GOMP_loop_ull_*start functions have additional boolean
3850 argument, true for < loops and false for > loops.
3851 In Fortran, the C bool type can be different from
3852 boolean_type_node. */
3853 c_bool_type
= TREE_TYPE (TREE_TYPE (built_in_decls
[start_fn
]));
3854 t5
= build_int_cst (c_bool_type
,
3855 fd
->loop
.cond_code
== LT_EXPR
? 1 : 0);
3858 t
= fold_convert (fd
->iter_type
, fd
->chunk_size
);
3859 t
= build_call_expr (built_in_decls
[start_fn
], 7,
3860 t5
, t0
, t1
, t2
, t
, t3
, t4
);
3863 t
= build_call_expr (built_in_decls
[start_fn
], 6,
3864 t5
, t0
, t1
, t2
, t3
, t4
);
3867 if (TREE_TYPE (t
) != boolean_type_node
)
3868 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
3869 t
, build_int_cst (TREE_TYPE (t
), 0));
3870 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3871 true, GSI_SAME_STMT
);
3872 gsi_insert_after (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
3874 /* Remove the GIMPLE_OMP_FOR statement. */
3875 gsi_remove (&gsi
, true);
3877 /* Iteration setup for sequential loop goes in L0_BB. */
3878 gsi
= gsi_start_bb (l0_bb
);
3881 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3882 if (POINTER_TYPE_P (type
))
3883 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3885 t
= fold_convert (type
, t
);
3886 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3887 false, GSI_CONTINUE_LINKING
);
3888 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
3889 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3893 t
= fold_build2 (MINUS_EXPR
, fd
->iter_type
, t
, bias
);
3894 if (POINTER_TYPE_P (type
))
3895 t
= fold_convert (lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
),
3897 t
= fold_convert (type
, t
);
3898 iend
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
3899 false, GSI_CONTINUE_LINKING
);
3900 if (fd
->collapse
> 1)
3902 tree tem
= create_tmp_var (type
, ".tem");
3904 stmt
= gimple_build_assign (tem
, fd
->loop
.v
);
3905 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3906 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3908 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
), itype
;
3910 if (POINTER_TYPE_P (vtype
))
3911 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (vtype
), 0);
3912 t
= fold_build2 (TRUNC_MOD_EXPR
, type
, tem
, counts
[i
]);
3913 t
= fold_convert (itype
, t
);
3914 t
= fold_build2 (MULT_EXPR
, itype
, t
,
3915 fold_convert (itype
, fd
->loops
[i
].step
));
3916 if (POINTER_TYPE_P (vtype
))
3917 t
= fold_build2 (POINTER_PLUS_EXPR
, vtype
,
3918 fd
->loops
[i
].n1
, fold_convert (sizetype
, t
));
3920 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loops
[i
].n1
, t
);
3921 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3922 false, GSI_CONTINUE_LINKING
);
3923 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
3924 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3927 t
= fold_build2 (TRUNC_DIV_EXPR
, type
, tem
, counts
[i
]);
3928 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3929 false, GSI_CONTINUE_LINKING
);
3930 stmt
= gimple_build_assign (tem
, t
);
3931 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3938 /* Code to control the increment and predicate for the sequential
3939 loop goes in the CONT_BB. */
3940 gsi
= gsi_last_bb (cont_bb
);
3941 stmt
= gsi_stmt (gsi
);
3942 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
3943 vmain
= gimple_omp_continue_control_use (stmt
);
3944 vback
= gimple_omp_continue_control_def (stmt
);
3946 if (POINTER_TYPE_P (type
))
3947 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, vmain
,
3948 fold_convert (sizetype
, fd
->loop
.step
));
3950 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
3951 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3952 true, GSI_SAME_STMT
);
3953 stmt
= gimple_build_assign (vback
, t
);
3954 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3956 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, iend
);
3957 stmt
= gimple_build_cond_empty (t
);
3958 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
3960 /* Remove GIMPLE_OMP_CONTINUE. */
3961 gsi_remove (&gsi
, true);
3963 if (fd
->collapse
> 1)
3965 basic_block last_bb
, bb
;
3968 for (i
= fd
->collapse
- 1; i
>= 0; i
--)
3970 tree vtype
= TREE_TYPE (fd
->loops
[i
].v
);
3972 bb
= create_empty_bb (last_bb
);
3973 gsi
= gsi_start_bb (bb
);
3975 if (i
< fd
->collapse
- 1)
3977 e
= make_edge (last_bb
, bb
, EDGE_FALSE_VALUE
);
3978 e
->probability
= REG_BR_PROB_BASE
/ 8;
3980 t
= fd
->loops
[i
+ 1].n1
;
3981 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3982 false, GSI_CONTINUE_LINKING
);
3983 stmt
= gimple_build_assign (fd
->loops
[i
+ 1].v
, t
);
3984 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
3989 set_immediate_dominator (CDI_DOMINATORS
, bb
, last_bb
);
3991 if (POINTER_TYPE_P (vtype
))
3992 t
= fold_build2 (POINTER_PLUS_EXPR
, vtype
,
3994 fold_convert (sizetype
, fd
->loops
[i
].step
));
3996 t
= fold_build2 (PLUS_EXPR
, vtype
, fd
->loops
[i
].v
,
3998 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
3999 false, GSI_CONTINUE_LINKING
);
4000 stmt
= gimple_build_assign (fd
->loops
[i
].v
, t
);
4001 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4005 t
= fd
->loops
[i
].n2
;
4006 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4007 false, GSI_CONTINUE_LINKING
);
4008 t
= fold_build2 (fd
->loops
[i
].cond_code
, boolean_type_node
,
4010 stmt
= gimple_build_cond_empty (t
);
4011 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4012 e
= make_edge (bb
, l1_bb
, EDGE_TRUE_VALUE
);
4013 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4016 make_edge (bb
, l1_bb
, EDGE_FALLTHRU
);
4021 /* Emit code to get the next parallel iteration in L2_BB. */
4022 gsi
= gsi_start_bb (l2_bb
);
4024 t
= build_call_expr (built_in_decls
[next_fn
], 2,
4025 build_fold_addr_expr (istart0
),
4026 build_fold_addr_expr (iend0
));
4027 t
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4028 false, GSI_CONTINUE_LINKING
);
4029 if (TREE_TYPE (t
) != boolean_type_node
)
4030 t
= fold_build2 (NE_EXPR
, boolean_type_node
,
4031 t
, build_int_cst (TREE_TYPE (t
), 0));
4032 stmt
= gimple_build_cond_empty (t
);
4033 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4036 /* Add the loop cleanup function. */
4037 gsi
= gsi_last_bb (exit_bb
);
4038 if (gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4039 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END_NOWAIT
];
4041 t
= built_in_decls
[BUILT_IN_GOMP_LOOP_END
];
4042 stmt
= gimple_build_call (t
, 0);
4043 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
4044 gsi_remove (&gsi
, true);
4046 /* Connect the new blocks. */
4047 find_edge (entry_bb
, l0_bb
)->flags
= EDGE_TRUE_VALUE
;
4048 find_edge (entry_bb
, l3_bb
)->flags
= EDGE_FALSE_VALUE
;
4054 e
= find_edge (cont_bb
, l3_bb
);
4055 ne
= make_edge (l2_bb
, l3_bb
, EDGE_FALSE_VALUE
);
4057 phis
= phi_nodes (l3_bb
);
4058 for (gsi
= gsi_start (phis
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4060 gimple phi
= gsi_stmt (gsi
);
4061 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, ne
),
4062 PHI_ARG_DEF_FROM_EDGE (phi
, e
));
4066 make_edge (cont_bb
, l2_bb
, EDGE_FALSE_VALUE
);
4067 if (fd
->collapse
> 1)
4069 e
= find_edge (cont_bb
, l1_bb
);
4071 e
= make_edge (cont_bb
, collapse_bb
, EDGE_TRUE_VALUE
);
4075 e
= find_edge (cont_bb
, l1_bb
);
4076 e
->flags
= EDGE_TRUE_VALUE
;
4078 e
->probability
= REG_BR_PROB_BASE
* 7 / 8;
4079 find_edge (cont_bb
, l2_bb
)->probability
= REG_BR_PROB_BASE
/ 8;
4080 make_edge (l2_bb
, l0_bb
, EDGE_TRUE_VALUE
);
4082 set_immediate_dominator (CDI_DOMINATORS
, l2_bb
,
4083 recompute_dominator (CDI_DOMINATORS
, l2_bb
));
4084 set_immediate_dominator (CDI_DOMINATORS
, l3_bb
,
4085 recompute_dominator (CDI_DOMINATORS
, l3_bb
));
4086 set_immediate_dominator (CDI_DOMINATORS
, l0_bb
,
4087 recompute_dominator (CDI_DOMINATORS
, l0_bb
));
4088 set_immediate_dominator (CDI_DOMINATORS
, l1_bb
,
4089 recompute_dominator (CDI_DOMINATORS
, l1_bb
));
4094 /* A subroutine of expand_omp_for. Generate code for a parallel
4095 loop with static schedule and no specified chunk size. Given
4098 for (V = N1; V cond N2; V += STEP) BODY;
4100 where COND is "<" or ">", we generate pseudocode
4106 if ((__typeof (V)) -1 > 0 && cond is >)
4107 n = -(adj + N2 - N1) / -STEP;
4109 n = (adj + N2 - N1) / STEP;
4111 q += (q * nthreads != n);
4113 e0 = min(s0 + q, n);
4115 if (s0 >= e0) goto L2; else goto L0;
4121 if (V cond e) goto L1;
4126 expand_omp_for_static_nochunk (struct omp_region
*region
,
4127 struct omp_for_data
*fd
)
4129 tree n
, q
, s0
, e0
, e
, t
, nthreads
, threadid
;
4130 tree type
, itype
, vmain
, vback
;
4131 basic_block entry_bb
, exit_bb
, seq_start_bb
, body_bb
, cont_bb
;
4133 gimple_stmt_iterator gsi
;
4136 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4137 if (POINTER_TYPE_P (type
))
4138 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4140 entry_bb
= region
->entry
;
4141 cont_bb
= region
->cont
;
4142 gcc_assert (EDGE_COUNT (entry_bb
->succs
) == 2);
4143 gcc_assert (BRANCH_EDGE (entry_bb
)->dest
== FALLTHRU_EDGE (cont_bb
)->dest
);
4144 seq_start_bb
= split_edge (FALLTHRU_EDGE (entry_bb
));
4145 body_bb
= single_succ (seq_start_bb
);
4146 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4147 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4148 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4149 exit_bb
= region
->exit
;
4151 /* Iteration space partitioning goes in ENTRY_BB. */
4152 gsi
= gsi_last_bb (entry_bb
);
4153 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_FOR
);
4155 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
4156 t
= fold_convert (itype
, t
);
4157 nthreads
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4158 true, GSI_SAME_STMT
);
4160 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
4161 t
= fold_convert (itype
, t
);
4162 threadid
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4163 true, GSI_SAME_STMT
);
4166 = force_gimple_operand_gsi (&gsi
, fold_convert (type
, fd
->loop
.n1
),
4167 true, NULL_TREE
, true, GSI_SAME_STMT
);
4169 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.n2
),
4170 true, NULL_TREE
, true, GSI_SAME_STMT
);
4172 = force_gimple_operand_gsi (&gsi
, fold_convert (itype
, fd
->loop
.step
),
4173 true, NULL_TREE
, true, GSI_SAME_STMT
);
4175 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4176 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4177 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4178 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4179 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4180 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4181 fold_build1 (NEGATE_EXPR
, itype
, t
),
4182 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4184 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4185 t
= fold_convert (itype
, t
);
4186 n
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4188 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, n
, nthreads
);
4189 q
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4191 t
= fold_build2 (MULT_EXPR
, itype
, q
, nthreads
);
4192 t
= fold_build2 (NE_EXPR
, itype
, t
, n
);
4193 t
= fold_build2 (PLUS_EXPR
, itype
, q
, t
);
4194 q
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4196 t
= build2 (MULT_EXPR
, itype
, q
, threadid
);
4197 s0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4199 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, q
);
4200 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4201 e0
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
4203 t
= build2 (GE_EXPR
, boolean_type_node
, s0
, e0
);
4204 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4206 /* Remove the GIMPLE_OMP_FOR statement. */
4207 gsi_remove (&gsi
, true);
4209 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4210 gsi
= gsi_start_bb (seq_start_bb
);
4212 t
= fold_convert (itype
, s0
);
4213 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4214 if (POINTER_TYPE_P (type
))
4215 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4216 fold_convert (sizetype
, t
));
4218 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4219 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4220 false, GSI_CONTINUE_LINKING
);
4221 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4222 gsi_insert_after (&gsi
, stmt
, GSI_CONTINUE_LINKING
);
4224 t
= fold_convert (itype
, e0
);
4225 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4226 if (POINTER_TYPE_P (type
))
4227 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4228 fold_convert (sizetype
, t
));
4230 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4231 e
= force_gimple_operand_gsi (&gsi
, t
, true, NULL_TREE
,
4232 false, GSI_CONTINUE_LINKING
);
4234 /* The code controlling the sequential loop replaces the
4235 GIMPLE_OMP_CONTINUE. */
4236 gsi
= gsi_last_bb (cont_bb
);
4237 stmt
= gsi_stmt (gsi
);
4238 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4239 vmain
= gimple_omp_continue_control_use (stmt
);
4240 vback
= gimple_omp_continue_control_def (stmt
);
4242 if (POINTER_TYPE_P (type
))
4243 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, vmain
,
4244 fold_convert (sizetype
, fd
->loop
.step
));
4246 t
= fold_build2 (PLUS_EXPR
, type
, vmain
, fd
->loop
.step
);
4247 t
= force_gimple_operand_gsi (&gsi
, t
, false, NULL_TREE
,
4248 true, GSI_SAME_STMT
);
4249 stmt
= gimple_build_assign (vback
, t
);
4250 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
4252 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, vback
, e
);
4253 gsi_insert_before (&gsi
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4255 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4256 gsi_remove (&gsi
, true);
4258 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4259 gsi
= gsi_last_bb (exit_bb
);
4260 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi
)))
4261 force_gimple_operand_gsi (&gsi
, build_omp_barrier (), false, NULL_TREE
,
4262 false, GSI_SAME_STMT
);
4263 gsi_remove (&gsi
, true);
4265 /* Connect all the blocks. */
4266 find_edge (entry_bb
, seq_start_bb
)->flags
= EDGE_FALSE_VALUE
;
4267 find_edge (entry_bb
, fin_bb
)->flags
= EDGE_TRUE_VALUE
;
4269 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4270 find_edge (cont_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4272 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
, entry_bb
);
4273 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4274 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4275 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4276 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4280 /* A subroutine of expand_omp_for. Generate code for a parallel
4281 loop with static schedule and a specified chunk size. Given
4284 for (V = N1; V cond N2; V += STEP) BODY;
4286 where COND is "<" or ">", we generate pseudocode
4292 if ((__typeof (V)) -1 > 0 && cond is >)
4293 n = -(adj + N2 - N1) / -STEP;
4295 n = (adj + N2 - N1) / STEP;
4297 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4298 here so that V is defined
4299 if the loop is not entered
4301 s0 = (trip * nthreads + threadid) * CHUNK;
4302 e0 = min(s0 + CHUNK, n);
4303 if (s0 < n) goto L1; else goto L4;
4310 if (V cond e) goto L2; else goto L3;
4318 expand_omp_for_static_chunk (struct omp_region
*region
, struct omp_for_data
*fd
)
4320 tree n
, s0
, e0
, e
, t
;
4321 tree trip_var
, trip_init
, trip_main
, trip_back
, nthreads
, threadid
;
4322 tree type
, itype
, v_main
, v_back
, v_extra
;
4323 basic_block entry_bb
, exit_bb
, body_bb
, seq_start_bb
, iter_part_bb
;
4324 basic_block trip_update_bb
, cont_bb
, fin_bb
;
4325 gimple_stmt_iterator si
;
4329 itype
= type
= TREE_TYPE (fd
->loop
.v
);
4330 if (POINTER_TYPE_P (type
))
4331 itype
= lang_hooks
.types
.type_for_size (TYPE_PRECISION (type
), 0);
4333 entry_bb
= region
->entry
;
4334 se
= split_block (entry_bb
, last_stmt (entry_bb
));
4336 iter_part_bb
= se
->dest
;
4337 cont_bb
= region
->cont
;
4338 gcc_assert (EDGE_COUNT (iter_part_bb
->succs
) == 2);
4339 gcc_assert (BRANCH_EDGE (iter_part_bb
)->dest
4340 == FALLTHRU_EDGE (cont_bb
)->dest
);
4341 seq_start_bb
= split_edge (FALLTHRU_EDGE (iter_part_bb
));
4342 body_bb
= single_succ (seq_start_bb
);
4343 gcc_assert (BRANCH_EDGE (cont_bb
)->dest
== body_bb
);
4344 gcc_assert (EDGE_COUNT (cont_bb
->succs
) == 2);
4345 fin_bb
= FALLTHRU_EDGE (cont_bb
)->dest
;
4346 trip_update_bb
= split_edge (FALLTHRU_EDGE (cont_bb
));
4347 exit_bb
= region
->exit
;
4349 /* Trip and adjustment setup goes in ENTRY_BB. */
4350 si
= gsi_last_bb (entry_bb
);
4351 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_FOR
);
4353 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_NUM_THREADS
], 0);
4354 t
= fold_convert (itype
, t
);
4355 nthreads
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4356 true, GSI_SAME_STMT
);
4358 t
= build_call_expr (built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
4359 t
= fold_convert (itype
, t
);
4360 threadid
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4361 true, GSI_SAME_STMT
);
4364 = force_gimple_operand_gsi (&si
, fold_convert (type
, fd
->loop
.n1
),
4365 true, NULL_TREE
, true, GSI_SAME_STMT
);
4367 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.n2
),
4368 true, NULL_TREE
, true, GSI_SAME_STMT
);
4370 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->loop
.step
),
4371 true, NULL_TREE
, true, GSI_SAME_STMT
);
4373 = force_gimple_operand_gsi (&si
, fold_convert (itype
, fd
->chunk_size
),
4374 true, NULL_TREE
, true, GSI_SAME_STMT
);
4376 t
= build_int_cst (itype
, (fd
->loop
.cond_code
== LT_EXPR
? -1 : 1));
4377 t
= fold_build2 (PLUS_EXPR
, itype
, fd
->loop
.step
, t
);
4378 t
= fold_build2 (PLUS_EXPR
, itype
, t
, fd
->loop
.n2
);
4379 t
= fold_build2 (MINUS_EXPR
, itype
, t
, fold_convert (itype
, fd
->loop
.n1
));
4380 if (TYPE_UNSIGNED (itype
) && fd
->loop
.cond_code
== GT_EXPR
)
4381 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
,
4382 fold_build1 (NEGATE_EXPR
, itype
, t
),
4383 fold_build1 (NEGATE_EXPR
, itype
, fd
->loop
.step
));
4385 t
= fold_build2 (TRUNC_DIV_EXPR
, itype
, t
, fd
->loop
.step
);
4386 t
= fold_convert (itype
, t
);
4387 n
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4388 true, GSI_SAME_STMT
);
4390 trip_var
= create_tmp_var (itype
, ".trip");
4391 if (gimple_in_ssa_p (cfun
))
4393 add_referenced_var (trip_var
);
4394 trip_init
= make_ssa_name (trip_var
, NULL
);
4395 trip_main
= make_ssa_name (trip_var
, NULL
);
4396 trip_back
= make_ssa_name (trip_var
, NULL
);
4400 trip_init
= trip_var
;
4401 trip_main
= trip_var
;
4402 trip_back
= trip_var
;
4405 stmt
= gimple_build_assign (trip_init
, build_int_cst (itype
, 0));
4406 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4408 t
= fold_build2 (MULT_EXPR
, itype
, threadid
, fd
->chunk_size
);
4409 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4410 if (POINTER_TYPE_P (type
))
4411 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4412 fold_convert (sizetype
, t
));
4414 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4415 v_extra
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4416 true, GSI_SAME_STMT
);
4418 /* Remove the GIMPLE_OMP_FOR. */
4419 gsi_remove (&si
, true);
4421 /* Iteration space partitioning goes in ITER_PART_BB. */
4422 si
= gsi_last_bb (iter_part_bb
);
4424 t
= fold_build2 (MULT_EXPR
, itype
, trip_main
, nthreads
);
4425 t
= fold_build2 (PLUS_EXPR
, itype
, t
, threadid
);
4426 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->chunk_size
);
4427 s0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4428 false, GSI_CONTINUE_LINKING
);
4430 t
= fold_build2 (PLUS_EXPR
, itype
, s0
, fd
->chunk_size
);
4431 t
= fold_build2 (MIN_EXPR
, itype
, t
, n
);
4432 e0
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4433 false, GSI_CONTINUE_LINKING
);
4435 t
= build2 (LT_EXPR
, boolean_type_node
, s0
, n
);
4436 gsi_insert_after (&si
, gimple_build_cond_empty (t
), GSI_CONTINUE_LINKING
);
4438 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4439 si
= gsi_start_bb (seq_start_bb
);
4441 t
= fold_convert (itype
, s0
);
4442 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4443 if (POINTER_TYPE_P (type
))
4444 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4445 fold_convert (sizetype
, t
));
4447 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4448 t
= force_gimple_operand_gsi (&si
, t
, false, NULL_TREE
,
4449 false, GSI_CONTINUE_LINKING
);
4450 stmt
= gimple_build_assign (fd
->loop
.v
, t
);
4451 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4453 t
= fold_convert (itype
, e0
);
4454 t
= fold_build2 (MULT_EXPR
, itype
, t
, fd
->loop
.step
);
4455 if (POINTER_TYPE_P (type
))
4456 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, fd
->loop
.n1
,
4457 fold_convert (sizetype
, t
));
4459 t
= fold_build2 (PLUS_EXPR
, type
, t
, fd
->loop
.n1
);
4460 e
= force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
,
4461 false, GSI_CONTINUE_LINKING
);
4463 /* The code controlling the sequential loop goes in CONT_BB,
4464 replacing the GIMPLE_OMP_CONTINUE. */
4465 si
= gsi_last_bb (cont_bb
);
4466 stmt
= gsi_stmt (si
);
4467 gcc_assert (gimple_code (stmt
) == GIMPLE_OMP_CONTINUE
);
4468 v_main
= gimple_omp_continue_control_use (stmt
);
4469 v_back
= gimple_omp_continue_control_def (stmt
);
4471 if (POINTER_TYPE_P (type
))
4472 t
= fold_build2 (POINTER_PLUS_EXPR
, type
, v_main
,
4473 fold_convert (sizetype
, fd
->loop
.step
));
4475 t
= fold_build2 (PLUS_EXPR
, type
, v_main
, fd
->loop
.step
);
4476 stmt
= gimple_build_assign (v_back
, t
);
4477 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
4479 t
= build2 (fd
->loop
.cond_code
, boolean_type_node
, v_back
, e
);
4480 gsi_insert_before (&si
, gimple_build_cond_empty (t
), GSI_SAME_STMT
);
4482 /* Remove GIMPLE_OMP_CONTINUE. */
4483 gsi_remove (&si
, true);
4485 /* Trip update code goes into TRIP_UPDATE_BB. */
4486 si
= gsi_start_bb (trip_update_bb
);
4488 t
= build_int_cst (itype
, 1);
4489 t
= build2 (PLUS_EXPR
, itype
, trip_main
, t
);
4490 stmt
= gimple_build_assign (trip_back
, t
);
4491 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4493 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4494 si
= gsi_last_bb (exit_bb
);
4495 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)))
4496 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4497 false, GSI_SAME_STMT
);
4498 gsi_remove (&si
, true);
4500 /* Connect the new blocks. */
4501 find_edge (iter_part_bb
, seq_start_bb
)->flags
= EDGE_TRUE_VALUE
;
4502 find_edge (iter_part_bb
, fin_bb
)->flags
= EDGE_FALSE_VALUE
;
4504 find_edge (cont_bb
, body_bb
)->flags
= EDGE_TRUE_VALUE
;
4505 find_edge (cont_bb
, trip_update_bb
)->flags
= EDGE_FALSE_VALUE
;
4507 redirect_edge_and_branch (single_succ_edge (trip_update_bb
), iter_part_bb
);
4509 if (gimple_in_ssa_p (cfun
))
4511 gimple_stmt_iterator psi
;
4514 edge_var_map_vector head
;
4518 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4519 remove arguments of the phi nodes in fin_bb. We need to create
4520 appropriate phi nodes in iter_part_bb instead. */
4521 se
= single_pred_edge (fin_bb
);
4522 re
= single_succ_edge (trip_update_bb
);
4523 head
= redirect_edge_var_map_vector (re
);
4524 ene
= single_succ_edge (entry_bb
);
4526 psi
= gsi_start_phis (fin_bb
);
4527 for (i
= 0; !gsi_end_p (psi
) && VEC_iterate (edge_var_map
, head
, i
, vm
);
4528 gsi_next (&psi
), ++i
)
4531 source_location locus
;
4533 phi
= gsi_stmt (psi
);
4534 t
= gimple_phi_result (phi
);
4535 gcc_assert (t
== redirect_edge_var_map_result (vm
));
4536 nphi
= create_phi_node (t
, iter_part_bb
);
4537 SSA_NAME_DEF_STMT (t
) = nphi
;
4539 t
= PHI_ARG_DEF_FROM_EDGE (phi
, se
);
4540 locus
= gimple_phi_arg_location_from_edge (phi
, se
);
4542 /* A special case -- fd->loop.v is not yet computed in
4543 iter_part_bb, we need to use v_extra instead. */
4544 if (t
== fd
->loop
.v
)
4546 add_phi_arg (nphi
, t
, ene
, locus
);
4547 locus
= redirect_edge_var_map_location (vm
);
4548 add_phi_arg (nphi
, redirect_edge_var_map_def (vm
), re
, locus
);
4550 gcc_assert (!gsi_end_p (psi
) && i
== VEC_length (edge_var_map
, head
));
4551 redirect_edge_var_map_clear (re
);
4554 psi
= gsi_start_phis (fin_bb
);
4555 if (gsi_end_p (psi
))
4557 remove_phi_node (&psi
, false);
4560 /* Make phi node for trip. */
4561 phi
= create_phi_node (trip_main
, iter_part_bb
);
4562 SSA_NAME_DEF_STMT (trip_main
) = phi
;
4563 add_phi_arg (phi
, trip_back
, single_succ_edge (trip_update_bb
),
4565 add_phi_arg (phi
, trip_init
, single_succ_edge (entry_bb
),
4569 set_immediate_dominator (CDI_DOMINATORS
, trip_update_bb
, cont_bb
);
4570 set_immediate_dominator (CDI_DOMINATORS
, iter_part_bb
,
4571 recompute_dominator (CDI_DOMINATORS
, iter_part_bb
));
4572 set_immediate_dominator (CDI_DOMINATORS
, fin_bb
,
4573 recompute_dominator (CDI_DOMINATORS
, fin_bb
));
4574 set_immediate_dominator (CDI_DOMINATORS
, seq_start_bb
,
4575 recompute_dominator (CDI_DOMINATORS
, seq_start_bb
));
4576 set_immediate_dominator (CDI_DOMINATORS
, body_bb
,
4577 recompute_dominator (CDI_DOMINATORS
, body_bb
));
4581 /* Expand the OpenMP loop defined by REGION. */
4584 expand_omp_for (struct omp_region
*region
)
4586 struct omp_for_data fd
;
4587 struct omp_for_data_loop
*loops
;
4590 = (struct omp_for_data_loop
*)
4591 alloca (gimple_omp_for_collapse (last_stmt (region
->entry
))
4592 * sizeof (struct omp_for_data_loop
));
4593 extract_omp_for_data (last_stmt (region
->entry
), &fd
, loops
);
4594 region
->sched_kind
= fd
.sched_kind
;
4596 gcc_assert (EDGE_COUNT (region
->entry
->succs
) == 2);
4597 BRANCH_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4598 FALLTHRU_EDGE (region
->entry
)->flags
&= ~EDGE_ABNORMAL
;
4601 gcc_assert (EDGE_COUNT (region
->cont
->succs
) == 2);
4602 BRANCH_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4603 FALLTHRU_EDGE (region
->cont
)->flags
&= ~EDGE_ABNORMAL
;
4606 if (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_STATIC
4609 && region
->cont
!= NULL
)
4611 if (fd
.chunk_size
== NULL
)
4612 expand_omp_for_static_nochunk (region
, &fd
);
4614 expand_omp_for_static_chunk (region
, &fd
);
4618 int fn_index
, start_ix
, next_ix
;
4620 gcc_assert (fd
.sched_kind
!= OMP_CLAUSE_SCHEDULE_AUTO
);
4621 fn_index
= (fd
.sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
)
4622 ? 3 : fd
.sched_kind
;
4623 fn_index
+= fd
.have_ordered
* 4;
4624 start_ix
= BUILT_IN_GOMP_LOOP_STATIC_START
+ fn_index
;
4625 next_ix
= BUILT_IN_GOMP_LOOP_STATIC_NEXT
+ fn_index
;
4626 if (fd
.iter_type
== long_long_unsigned_type_node
)
4628 start_ix
+= BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4629 - BUILT_IN_GOMP_LOOP_STATIC_START
;
4630 next_ix
+= BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4631 - BUILT_IN_GOMP_LOOP_STATIC_NEXT
;
4633 expand_omp_for_generic (region
, &fd
, (enum built_in_function
) start_ix
,
4634 (enum built_in_function
) next_ix
);
4637 update_ssa (TODO_update_ssa_only_virtuals
);
4641 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4643 v = GOMP_sections_start (n);
4660 v = GOMP_sections_next ();
4665 If this is a combined parallel sections, replace the call to
4666 GOMP_sections_start with call to GOMP_sections_next. */
4669 expand_omp_sections (struct omp_region
*region
)
4671 tree t
, u
, vin
= NULL
, vmain
, vnext
, l2
;
4672 VEC (tree
,heap
) *label_vec
;
4674 basic_block entry_bb
, l0_bb
, l1_bb
, l2_bb
, default_bb
;
4675 gimple_stmt_iterator si
, switch_si
;
4676 gimple sections_stmt
, stmt
, cont
;
4679 struct omp_region
*inner
;
4681 bool exit_reachable
= region
->cont
!= NULL
;
4683 gcc_assert (exit_reachable
== (region
->exit
!= NULL
));
4684 entry_bb
= region
->entry
;
4685 l0_bb
= single_succ (entry_bb
);
4686 l1_bb
= region
->cont
;
4687 l2_bb
= region
->exit
;
4690 if (single_pred_p (l2_bb
) && single_pred (l2_bb
) == l0_bb
)
4691 l2
= gimple_block_label (l2_bb
);
4694 /* This can happen if there are reductions. */
4695 len
= EDGE_COUNT (l0_bb
->succs
);
4696 gcc_assert (len
> 0);
4697 e
= EDGE_SUCC (l0_bb
, len
- 1);
4698 si
= gsi_last_bb (e
->dest
);
4701 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4702 l2
= gimple_block_label (e
->dest
);
4704 FOR_EACH_EDGE (e
, ei
, l0_bb
->succs
)
4706 si
= gsi_last_bb (e
->dest
);
4708 || gimple_code (gsi_stmt (si
)) != GIMPLE_OMP_SECTION
)
4710 l2
= gimple_block_label (e
->dest
);
4715 default_bb
= create_empty_bb (l1_bb
->prev_bb
);
4719 default_bb
= create_empty_bb (l0_bb
);
4720 l2
= gimple_block_label (default_bb
);
4723 /* We will build a switch() with enough cases for all the
4724 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4725 and a default case to abort if something goes wrong. */
4726 len
= EDGE_COUNT (l0_bb
->succs
);
4728 /* Use VEC_quick_push on label_vec throughout, since we know the size
4730 label_vec
= VEC_alloc (tree
, heap
, len
);
4732 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4733 GIMPLE_OMP_SECTIONS statement. */
4734 si
= gsi_last_bb (entry_bb
);
4735 sections_stmt
= gsi_stmt (si
);
4736 gcc_assert (gimple_code (sections_stmt
) == GIMPLE_OMP_SECTIONS
);
4737 vin
= gimple_omp_sections_control (sections_stmt
);
4738 if (!is_combined_parallel (region
))
4740 /* If we are not inside a combined parallel+sections region,
4741 call GOMP_sections_start. */
4742 t
= build_int_cst (unsigned_type_node
,
4743 exit_reachable
? len
- 1 : len
);
4744 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_START
];
4745 stmt
= gimple_build_call (u
, 1, t
);
4749 /* Otherwise, call GOMP_sections_next. */
4750 u
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
];
4751 stmt
= gimple_build_call (u
, 0);
4753 gimple_call_set_lhs (stmt
, vin
);
4754 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4755 gsi_remove (&si
, true);
4757 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4759 switch_si
= gsi_last_bb (l0_bb
);
4760 gcc_assert (gimple_code (gsi_stmt (switch_si
)) == GIMPLE_OMP_SECTIONS_SWITCH
);
4763 cont
= last_stmt (l1_bb
);
4764 gcc_assert (gimple_code (cont
) == GIMPLE_OMP_CONTINUE
);
4765 vmain
= gimple_omp_continue_control_use (cont
);
4766 vnext
= gimple_omp_continue_control_def (cont
);
4777 t
= build_case_label (build_int_cst (unsigned_type_node
, 0), NULL
, l2
);
4778 VEC_quick_push (tree
, label_vec
, t
);
4782 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4783 for (inner
= region
->inner
, casei
= 1;
4785 inner
= inner
->next
, i
++, casei
++)
4787 basic_block s_entry_bb
, s_exit_bb
;
4789 /* Skip optional reduction region. */
4790 if (inner
->type
== GIMPLE_OMP_ATOMIC_LOAD
)
4797 s_entry_bb
= inner
->entry
;
4798 s_exit_bb
= inner
->exit
;
4800 t
= gimple_block_label (s_entry_bb
);
4801 u
= build_int_cst (unsigned_type_node
, casei
);
4802 u
= build_case_label (u
, NULL
, t
);
4803 VEC_quick_push (tree
, label_vec
, u
);
4805 si
= gsi_last_bb (s_entry_bb
);
4806 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SECTION
);
4807 gcc_assert (i
< len
|| gimple_omp_section_last_p (gsi_stmt (si
)));
4808 gsi_remove (&si
, true);
4809 single_succ_edge (s_entry_bb
)->flags
= EDGE_FALLTHRU
;
4811 if (s_exit_bb
== NULL
)
4814 si
= gsi_last_bb (s_exit_bb
);
4815 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4816 gsi_remove (&si
, true);
4818 single_succ_edge (s_exit_bb
)->flags
= EDGE_FALLTHRU
;
4821 /* Error handling code goes in DEFAULT_BB. */
4822 t
= gimple_block_label (default_bb
);
4823 u
= build_case_label (NULL
, NULL
, t
);
4824 make_edge (l0_bb
, default_bb
, 0);
4826 stmt
= gimple_build_switch_vec (vmain
, u
, label_vec
);
4827 gsi_insert_after (&switch_si
, stmt
, GSI_SAME_STMT
);
4828 gsi_remove (&switch_si
, true);
4829 VEC_free (tree
, heap
, label_vec
);
4831 si
= gsi_start_bb (default_bb
);
4832 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_TRAP
], 0);
4833 gsi_insert_after (&si
, stmt
, GSI_CONTINUE_LINKING
);
4837 /* Code to get the next section goes in L1_BB. */
4838 si
= gsi_last_bb (l1_bb
);
4839 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CONTINUE
);
4841 stmt
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_SECTIONS_NEXT
], 0);
4842 gimple_call_set_lhs (stmt
, vnext
);
4843 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4844 gsi_remove (&si
, true);
4846 single_succ_edge (l1_bb
)->flags
= EDGE_FALLTHRU
;
4848 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4849 si
= gsi_last_bb (l2_bb
);
4850 if (gimple_omp_return_nowait_p (gsi_stmt (si
)))
4851 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END_NOWAIT
];
4853 t
= built_in_decls
[BUILT_IN_GOMP_SECTIONS_END
];
4854 stmt
= gimple_build_call (t
, 0);
4855 gsi_insert_after (&si
, stmt
, GSI_SAME_STMT
);
4856 gsi_remove (&si
, true);
4859 set_immediate_dominator (CDI_DOMINATORS
, default_bb
, l0_bb
);
4863 /* Expand code for an OpenMP single directive. We've already expanded
4864 much of the code, here we simply place the GOMP_barrier call. */
4867 expand_omp_single (struct omp_region
*region
)
4869 basic_block entry_bb
, exit_bb
;
4870 gimple_stmt_iterator si
;
4871 bool need_barrier
= false;
4873 entry_bb
= region
->entry
;
4874 exit_bb
= region
->exit
;
4876 si
= gsi_last_bb (entry_bb
);
4877 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4878 be removed. We need to ensure that the thread that entered the single
4879 does not exit before the data is copied out by the other threads. */
4880 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si
)),
4881 OMP_CLAUSE_COPYPRIVATE
))
4882 need_barrier
= true;
4883 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
);
4884 gsi_remove (&si
, true);
4885 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4887 si
= gsi_last_bb (exit_bb
);
4888 if (!gimple_omp_return_nowait_p (gsi_stmt (si
)) || need_barrier
)
4889 force_gimple_operand_gsi (&si
, build_omp_barrier (), false, NULL_TREE
,
4890 false, GSI_SAME_STMT
);
4891 gsi_remove (&si
, true);
4892 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4896 /* Generic expansion for OpenMP synchronization directives: master,
4897 ordered and critical. All we need to do here is remove the entry
4898 and exit markers for REGION. */
4901 expand_omp_synch (struct omp_region
*region
)
4903 basic_block entry_bb
, exit_bb
;
4904 gimple_stmt_iterator si
;
4906 entry_bb
= region
->entry
;
4907 exit_bb
= region
->exit
;
4909 si
= gsi_last_bb (entry_bb
);
4910 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_SINGLE
4911 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_MASTER
4912 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ORDERED
4913 || gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_CRITICAL
);
4914 gsi_remove (&si
, true);
4915 single_succ_edge (entry_bb
)->flags
= EDGE_FALLTHRU
;
4919 si
= gsi_last_bb (exit_bb
);
4920 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_RETURN
);
4921 gsi_remove (&si
, true);
4922 single_succ_edge (exit_bb
)->flags
= EDGE_FALLTHRU
;
4926 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4927 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4928 size of the data type, and thus usable to find the index of the builtin
4929 decl. Returns false if the expression is not of the proper form. */
4932 expand_omp_atomic_fetch_op (basic_block load_bb
,
4933 tree addr
, tree loaded_val
,
4934 tree stored_val
, int index
)
4936 enum built_in_function base
;
4937 tree decl
, itype
, call
;
4940 basic_block store_bb
= single_succ (load_bb
);
4941 gimple_stmt_iterator gsi
;
4945 /* We expect to find the following sequences:
4948 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4951 val = tmp OP something; (or: something OP tmp)
4952 GIMPLE_OMP_STORE (val)
4954 ???FIXME: Allow a more flexible sequence.
4955 Perhaps use data flow to pick the statements.
4959 gsi
= gsi_after_labels (store_bb
);
4960 stmt
= gsi_stmt (gsi
);
4961 loc
= gimple_location (stmt
);
4962 if (!is_gimple_assign (stmt
))
4965 if (gimple_code (gsi_stmt (gsi
)) != GIMPLE_OMP_ATOMIC_STORE
)
4968 if (!operand_equal_p (gimple_assign_lhs (stmt
), stored_val
, 0))
4971 /* Check for one of the supported fetch-op operations. */
4972 switch (gimple_assign_rhs_code (stmt
))
4975 case POINTER_PLUS_EXPR
:
4976 base
= BUILT_IN_FETCH_AND_ADD_N
;
4977 optab
= sync_add_optab
;
4980 base
= BUILT_IN_FETCH_AND_SUB_N
;
4981 optab
= sync_add_optab
;
4984 base
= BUILT_IN_FETCH_AND_AND_N
;
4985 optab
= sync_and_optab
;
4988 base
= BUILT_IN_FETCH_AND_OR_N
;
4989 optab
= sync_ior_optab
;
4992 base
= BUILT_IN_FETCH_AND_XOR_N
;
4993 optab
= sync_xor_optab
;
4998 /* Make sure the expression is of the proper form. */
4999 if (operand_equal_p (gimple_assign_rhs1 (stmt
), loaded_val
, 0))
5000 rhs
= gimple_assign_rhs2 (stmt
);
5001 else if (commutative_tree_code (gimple_assign_rhs_code (stmt
))
5002 && operand_equal_p (gimple_assign_rhs2 (stmt
), loaded_val
, 0))
5003 rhs
= gimple_assign_rhs1 (stmt
);
5007 decl
= built_in_decls
[base
+ index
+ 1];
5008 if (decl
== NULL_TREE
)
5010 itype
= TREE_TYPE (TREE_TYPE (decl
));
5012 if (direct_optab_handler (optab
, TYPE_MODE (itype
)) == CODE_FOR_nothing
)
5015 gsi
= gsi_last_bb (load_bb
);
5016 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5017 call
= build_call_expr_loc (loc
,
5019 fold_convert_loc (loc
, itype
, rhs
));
5020 call
= fold_convert_loc (loc
, void_type_node
, call
);
5021 force_gimple_operand_gsi (&gsi
, call
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5022 gsi_remove (&gsi
, true);
5024 gsi
= gsi_last_bb (store_bb
);
5025 gcc_assert (gimple_code (gsi_stmt (gsi
)) == GIMPLE_OMP_ATOMIC_STORE
);
5026 gsi_remove (&gsi
, true);
5027 gsi
= gsi_last_bb (store_bb
);
5028 gsi_remove (&gsi
, true);
5030 if (gimple_in_ssa_p (cfun
))
5031 update_ssa (TODO_update_ssa_no_phi
);
5036 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5040 newval = rhs; // with oldval replacing *addr in rhs
5041 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5042 if (oldval != newval)
5045 INDEX is log2 of the size of the data type, and thus usable to find the
5046 index of the builtin decl. */
5049 expand_omp_atomic_pipeline (basic_block load_bb
, basic_block store_bb
,
5050 tree addr
, tree loaded_val
, tree stored_val
,
5053 tree loadedi
, storedi
, initial
, new_storedi
, old_vali
;
5054 tree type
, itype
, cmpxchg
, iaddr
;
5055 gimple_stmt_iterator si
;
5056 basic_block loop_header
= single_succ (load_bb
);
5060 cmpxchg
= built_in_decls
[BUILT_IN_VAL_COMPARE_AND_SWAP_N
+ index
+ 1];
5061 if (cmpxchg
== NULL_TREE
)
5063 type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5064 itype
= TREE_TYPE (TREE_TYPE (cmpxchg
));
5066 if (direct_optab_handler (sync_compare_and_swap_optab
, TYPE_MODE (itype
))
5067 == CODE_FOR_nothing
)
5070 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5071 si
= gsi_last_bb (load_bb
);
5072 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5074 /* For floating-point values, we'll need to view-convert them to integers
5075 so that we can perform the atomic compare and swap. Simplify the
5076 following code by always setting up the "i"ntegral variables. */
5077 if (!INTEGRAL_TYPE_P (type
) && !POINTER_TYPE_P (type
))
5081 iaddr
= create_tmp_var (build_pointer_type_for_mode (itype
, ptr_mode
,
5084 = force_gimple_operand_gsi (&si
,
5085 fold_convert (TREE_TYPE (iaddr
), addr
),
5086 false, NULL_TREE
, true, GSI_SAME_STMT
);
5087 stmt
= gimple_build_assign (iaddr
, iaddr_val
);
5088 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5089 loadedi
= create_tmp_var (itype
, NULL
);
5090 if (gimple_in_ssa_p (cfun
))
5092 add_referenced_var (iaddr
);
5093 add_referenced_var (loadedi
);
5094 loadedi
= make_ssa_name (loadedi
, NULL
);
5100 loadedi
= loaded_val
;
5104 = force_gimple_operand_gsi (&si
,
5105 build2 (MEM_REF
, TREE_TYPE (TREE_TYPE (iaddr
)),
5107 build_int_cst (TREE_TYPE (iaddr
), 0)),
5108 true, NULL_TREE
, true, GSI_SAME_STMT
);
5110 /* Move the value to the LOADEDI temporary. */
5111 if (gimple_in_ssa_p (cfun
))
5113 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header
)));
5114 phi
= create_phi_node (loadedi
, loop_header
);
5115 SSA_NAME_DEF_STMT (loadedi
) = phi
;
5116 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, single_succ_edge (load_bb
)),
5120 gsi_insert_before (&si
,
5121 gimple_build_assign (loadedi
, initial
),
5123 if (loadedi
!= loaded_val
)
5125 gimple_stmt_iterator gsi2
;
5128 x
= build1 (VIEW_CONVERT_EXPR
, type
, loadedi
);
5129 gsi2
= gsi_start_bb (loop_header
);
5130 if (gimple_in_ssa_p (cfun
))
5133 x
= force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5134 true, GSI_SAME_STMT
);
5135 stmt
= gimple_build_assign (loaded_val
, x
);
5136 gsi_insert_before (&gsi2
, stmt
, GSI_SAME_STMT
);
5140 x
= build2 (MODIFY_EXPR
, TREE_TYPE (loaded_val
), loaded_val
, x
);
5141 force_gimple_operand_gsi (&gsi2
, x
, true, NULL_TREE
,
5142 true, GSI_SAME_STMT
);
5145 gsi_remove (&si
, true);
5147 si
= gsi_last_bb (store_bb
);
5148 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5151 storedi
= stored_val
;
5154 force_gimple_operand_gsi (&si
,
5155 build1 (VIEW_CONVERT_EXPR
, itype
,
5156 stored_val
), true, NULL_TREE
, true,
5159 /* Build the compare&swap statement. */
5160 new_storedi
= build_call_expr (cmpxchg
, 3, iaddr
, loadedi
, storedi
);
5161 new_storedi
= force_gimple_operand_gsi (&si
,
5162 fold_convert (TREE_TYPE (loadedi
),
5165 true, GSI_SAME_STMT
);
5167 if (gimple_in_ssa_p (cfun
))
5171 old_vali
= create_tmp_var (TREE_TYPE (loadedi
), NULL
);
5172 if (gimple_in_ssa_p (cfun
))
5173 add_referenced_var (old_vali
);
5174 stmt
= gimple_build_assign (old_vali
, loadedi
);
5175 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5177 stmt
= gimple_build_assign (loadedi
, new_storedi
);
5178 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5181 /* Note that we always perform the comparison as an integer, even for
5182 floating point. This allows the atomic operation to properly
5183 succeed even with NaNs and -0.0. */
5184 stmt
= gimple_build_cond_empty
5185 (build2 (NE_EXPR
, boolean_type_node
,
5186 new_storedi
, old_vali
));
5187 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5190 e
= single_succ_edge (store_bb
);
5191 e
->flags
&= ~EDGE_FALLTHRU
;
5192 e
->flags
|= EDGE_FALSE_VALUE
;
5194 e
= make_edge (store_bb
, loop_header
, EDGE_TRUE_VALUE
);
5196 /* Copy the new value to loadedi (we already did that before the condition
5197 if we are not in SSA). */
5198 if (gimple_in_ssa_p (cfun
))
5200 phi
= gimple_seq_first_stmt (phi_nodes (loop_header
));
5201 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi
, e
), new_storedi
);
5204 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5205 gsi_remove (&si
, true);
5207 if (gimple_in_ssa_p (cfun
))
5208 update_ssa (TODO_update_ssa_no_phi
);
5213 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5215 GOMP_atomic_start ();
5219 The result is not globally atomic, but works so long as all parallel
5220 references are within #pragma omp atomic directives. According to
5221 responses received from omp@openmp.org, appears to be within spec.
5222 Which makes sense, since that's how several other compilers handle
5223 this situation as well.
5224 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5225 expanding. STORED_VAL is the operand of the matching
5226 GIMPLE_OMP_ATOMIC_STORE.
5229 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5233 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5238 expand_omp_atomic_mutex (basic_block load_bb
, basic_block store_bb
,
5239 tree addr
, tree loaded_val
, tree stored_val
)
5241 gimple_stmt_iterator si
;
5245 si
= gsi_last_bb (load_bb
);
5246 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_LOAD
);
5248 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_START
];
5249 t
= build_call_expr (t
, 0);
5250 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5252 stmt
= gimple_build_assign (loaded_val
, build_simple_mem_ref (addr
));
5253 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5254 gsi_remove (&si
, true);
5256 si
= gsi_last_bb (store_bb
);
5257 gcc_assert (gimple_code (gsi_stmt (si
)) == GIMPLE_OMP_ATOMIC_STORE
);
5259 stmt
= gimple_build_assign (build_simple_mem_ref (unshare_expr (addr
)),
5261 gsi_insert_before (&si
, stmt
, GSI_SAME_STMT
);
5263 t
= built_in_decls
[BUILT_IN_GOMP_ATOMIC_END
];
5264 t
= build_call_expr (t
, 0);
5265 force_gimple_operand_gsi (&si
, t
, true, NULL_TREE
, true, GSI_SAME_STMT
);
5266 gsi_remove (&si
, true);
5268 if (gimple_in_ssa_p (cfun
))
5269 update_ssa (TODO_update_ssa_no_phi
);
5273 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5274 using expand_omp_atomic_fetch_op. If it failed, we try to
5275 call expand_omp_atomic_pipeline, and if it fails too, the
5276 ultimate fallback is wrapping the operation in a mutex
5277 (expand_omp_atomic_mutex). REGION is the atomic region built
5278 by build_omp_regions_1(). */
5281 expand_omp_atomic (struct omp_region
*region
)
5283 basic_block load_bb
= region
->entry
, store_bb
= region
->exit
;
5284 gimple load
= last_stmt (load_bb
), store
= last_stmt (store_bb
);
5285 tree loaded_val
= gimple_omp_atomic_load_lhs (load
);
5286 tree addr
= gimple_omp_atomic_load_rhs (load
);
5287 tree stored_val
= gimple_omp_atomic_store_val (store
);
5288 tree type
= TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr
)));
5289 HOST_WIDE_INT index
;
5291 /* Make sure the type is one of the supported sizes. */
5292 index
= tree_low_cst (TYPE_SIZE_UNIT (type
), 1);
5293 index
= exact_log2 (index
);
5294 if (index
>= 0 && index
<= 4)
5296 unsigned int align
= TYPE_ALIGN_UNIT (type
);
5298 /* __sync builtins require strict data alignment. */
5299 if (exact_log2 (align
) >= index
)
5301 /* When possible, use specialized atomic update functions. */
5302 if ((INTEGRAL_TYPE_P (type
) || POINTER_TYPE_P (type
))
5303 && store_bb
== single_succ (load_bb
))
5305 if (expand_omp_atomic_fetch_op (load_bb
, addr
,
5306 loaded_val
, stored_val
, index
))
5310 /* If we don't have specialized __sync builtins, try and implement
5311 as a compare and swap loop. */
5312 if (expand_omp_atomic_pipeline (load_bb
, store_bb
, addr
,
5313 loaded_val
, stored_val
, index
))
5318 /* The ultimate fallback is wrapping the operation in a mutex. */
5319 expand_omp_atomic_mutex (load_bb
, store_bb
, addr
, loaded_val
, stored_val
);
5323 /* Expand the parallel region tree rooted at REGION. Expansion
5324 proceeds in depth-first order. Innermost regions are expanded
5325 first. This way, parallel regions that require a new function to
5326 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5327 internal dependencies in their body. */
5330 expand_omp (struct omp_region
*region
)
5334 location_t saved_location
;
5336 /* First, determine whether this is a combined parallel+workshare
5338 if (region
->type
== GIMPLE_OMP_PARALLEL
)
5339 determine_parallel_type (region
);
5342 expand_omp (region
->inner
);
5344 saved_location
= input_location
;
5345 if (gimple_has_location (last_stmt (region
->entry
)))
5346 input_location
= gimple_location (last_stmt (region
->entry
));
5348 switch (region
->type
)
5350 case GIMPLE_OMP_PARALLEL
:
5351 case GIMPLE_OMP_TASK
:
5352 expand_omp_taskreg (region
);
5355 case GIMPLE_OMP_FOR
:
5356 expand_omp_for (region
);
5359 case GIMPLE_OMP_SECTIONS
:
5360 expand_omp_sections (region
);
5363 case GIMPLE_OMP_SECTION
:
5364 /* Individual omp sections are handled together with their
5365 parent GIMPLE_OMP_SECTIONS region. */
5368 case GIMPLE_OMP_SINGLE
:
5369 expand_omp_single (region
);
5372 case GIMPLE_OMP_MASTER
:
5373 case GIMPLE_OMP_ORDERED
:
5374 case GIMPLE_OMP_CRITICAL
:
5375 expand_omp_synch (region
);
5378 case GIMPLE_OMP_ATOMIC_LOAD
:
5379 expand_omp_atomic (region
);
5386 input_location
= saved_location
;
5387 region
= region
->next
;
5392 /* Helper for build_omp_regions. Scan the dominator tree starting at
5393 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5394 true, the function ends once a single tree is built (otherwise, whole
5395 forest of OMP constructs may be built). */
5398 build_omp_regions_1 (basic_block bb
, struct omp_region
*parent
,
5401 gimple_stmt_iterator gsi
;
5405 gsi
= gsi_last_bb (bb
);
5406 if (!gsi_end_p (gsi
) && is_gimple_omp (gsi_stmt (gsi
)))
5408 struct omp_region
*region
;
5409 enum gimple_code code
;
5411 stmt
= gsi_stmt (gsi
);
5412 code
= gimple_code (stmt
);
5413 if (code
== GIMPLE_OMP_RETURN
)
5415 /* STMT is the return point out of region PARENT. Mark it
5416 as the exit point and make PARENT the immediately
5417 enclosing region. */
5418 gcc_assert (parent
);
5421 parent
= parent
->outer
;
5423 else if (code
== GIMPLE_OMP_ATOMIC_STORE
)
5425 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5426 GIMPLE_OMP_RETURN, but matches with
5427 GIMPLE_OMP_ATOMIC_LOAD. */
5428 gcc_assert (parent
);
5429 gcc_assert (parent
->type
== GIMPLE_OMP_ATOMIC_LOAD
);
5432 parent
= parent
->outer
;
5435 else if (code
== GIMPLE_OMP_CONTINUE
)
5437 gcc_assert (parent
);
5440 else if (code
== GIMPLE_OMP_SECTIONS_SWITCH
)
5442 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5443 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5448 /* Otherwise, this directive becomes the parent for a new
5450 region
= new_omp_region (bb
, code
, parent
);
5455 if (single_tree
&& !parent
)
5458 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
5460 son
= next_dom_son (CDI_DOMINATORS
, son
))
5461 build_omp_regions_1 (son
, parent
, single_tree
);
5464 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5468 build_omp_regions_root (basic_block root
)
5470 gcc_assert (root_omp_region
== NULL
);
5471 build_omp_regions_1 (root
, NULL
, true);
5472 gcc_assert (root_omp_region
!= NULL
);
5475 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5478 omp_expand_local (basic_block head
)
5480 build_omp_regions_root (head
);
5481 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5483 fprintf (dump_file
, "\nOMP region tree\n\n");
5484 dump_omp_region (dump_file
, root_omp_region
, 0);
5485 fprintf (dump_file
, "\n");
5488 remove_exit_barriers (root_omp_region
);
5489 expand_omp (root_omp_region
);
5491 free_omp_regions ();
5494 /* Scan the CFG and build a tree of OMP regions. Return the root of
5495 the OMP region tree. */
5498 build_omp_regions (void)
5500 gcc_assert (root_omp_region
== NULL
);
5501 calculate_dominance_info (CDI_DOMINATORS
);
5502 build_omp_regions_1 (ENTRY_BLOCK_PTR
, NULL
, false);
5505 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5508 execute_expand_omp (void)
5510 build_omp_regions ();
5512 if (!root_omp_region
)
5517 fprintf (dump_file
, "\nOMP region tree\n\n");
5518 dump_omp_region (dump_file
, root_omp_region
, 0);
5519 fprintf (dump_file
, "\n");
5522 remove_exit_barriers (root_omp_region
);
5524 expand_omp (root_omp_region
);
5526 cleanup_tree_cfg ();
5528 free_omp_regions ();
5533 /* OMP expansion -- the default pass, run before creation of SSA form. */
5536 gate_expand_omp (void)
5538 return (flag_openmp
!= 0 && !seen_error ());
5541 struct gimple_opt_pass pass_expand_omp
=
5545 "ompexp", /* name */
5546 gate_expand_omp
, /* gate */
5547 execute_expand_omp
, /* execute */
5550 0, /* static_pass_number */
5551 TV_NONE
, /* tv_id */
5552 PROP_gimple_any
, /* properties_required */
5553 0, /* properties_provided */
5554 0, /* properties_destroyed */
5555 0, /* todo_flags_start */
5556 TODO_dump_func
/* todo_flags_finish */
5560 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5562 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5563 CTX is the enclosing OMP context for the current statement. */
5566 lower_omp_sections (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5568 tree block
, control
;
5569 gimple_stmt_iterator tgsi
;
5571 gimple stmt
, new_stmt
, bind
, t
;
5572 gimple_seq ilist
, dlist
, olist
, new_body
, body
;
5573 struct gimplify_ctx gctx
;
5575 stmt
= gsi_stmt (*gsi_p
);
5577 push_gimplify_context (&gctx
);
5581 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt
),
5582 &ilist
, &dlist
, ctx
);
5584 tgsi
= gsi_start (gimple_omp_body (stmt
));
5585 for (len
= 0; !gsi_end_p (tgsi
); len
++, gsi_next (&tgsi
))
5588 tgsi
= gsi_start (gimple_omp_body (stmt
));
5590 for (i
= 0; i
< len
; i
++, gsi_next (&tgsi
))
5595 sec_start
= gsi_stmt (tgsi
);
5596 sctx
= maybe_lookup_ctx (sec_start
);
5599 gimple_seq_add_stmt (&body
, sec_start
);
5601 lower_omp (gimple_omp_body (sec_start
), sctx
);
5602 gimple_seq_add_seq (&body
, gimple_omp_body (sec_start
));
5603 gimple_omp_set_body (sec_start
, NULL
);
5607 gimple_seq l
= NULL
;
5608 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt
), NULL
,
5610 gimple_seq_add_seq (&body
, l
);
5611 gimple_omp_section_set_last (sec_start
);
5614 gimple_seq_add_stmt (&body
, gimple_build_omp_return (false));
5617 block
= make_node (BLOCK
);
5618 bind
= gimple_build_bind (NULL
, body
, block
);
5621 lower_reduction_clauses (gimple_omp_sections_clauses (stmt
), &olist
, ctx
);
5623 block
= make_node (BLOCK
);
5624 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
5626 pop_gimplify_context (new_stmt
);
5627 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
5628 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5629 if (BLOCK_VARS (block
))
5630 TREE_USED (block
) = 1;
5633 gimple_seq_add_seq (&new_body
, ilist
);
5634 gimple_seq_add_stmt (&new_body
, stmt
);
5635 gimple_seq_add_stmt (&new_body
, gimple_build_omp_sections_switch ());
5636 gimple_seq_add_stmt (&new_body
, bind
);
5638 control
= create_tmp_var (unsigned_type_node
, ".section");
5639 t
= gimple_build_omp_continue (control
, control
);
5640 gimple_omp_sections_set_control (stmt
, control
);
5641 gimple_seq_add_stmt (&new_body
, t
);
5643 gimple_seq_add_seq (&new_body
, olist
);
5644 gimple_seq_add_seq (&new_body
, dlist
);
5646 new_body
= maybe_catch_exception (new_body
);
5648 t
= gimple_build_omp_return
5649 (!!find_omp_clause (gimple_omp_sections_clauses (stmt
),
5650 OMP_CLAUSE_NOWAIT
));
5651 gimple_seq_add_stmt (&new_body
, t
);
5653 gimple_bind_set_body (new_stmt
, new_body
);
5654 gimple_omp_set_body (stmt
, NULL
);
5656 gsi_replace (gsi_p
, new_stmt
, true);
5660 /* A subroutine of lower_omp_single. Expand the simple form of
5661 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5663 if (GOMP_single_start ())
5665 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5667 FIXME. It may be better to delay expanding the logic of this until
5668 pass_expand_omp. The expanded logic may make the job more difficult
5669 to a synchronization analysis pass. */
5672 lower_omp_single_simple (gimple single_stmt
, gimple_seq
*pre_p
)
5674 location_t loc
= gimple_location (single_stmt
);
5675 tree tlabel
= create_artificial_label (loc
);
5676 tree flabel
= create_artificial_label (loc
);
5680 decl
= built_in_decls
[BUILT_IN_GOMP_SINGLE_START
];
5681 lhs
= create_tmp_var (TREE_TYPE (TREE_TYPE (decl
)), NULL
);
5682 call
= gimple_build_call (decl
, 0);
5683 gimple_call_set_lhs (call
, lhs
);
5684 gimple_seq_add_stmt (pre_p
, call
);
5686 cond
= gimple_build_cond (EQ_EXPR
, lhs
,
5687 fold_convert_loc (loc
, TREE_TYPE (lhs
),
5690 gimple_seq_add_stmt (pre_p
, cond
);
5691 gimple_seq_add_stmt (pre_p
, gimple_build_label (tlabel
));
5692 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5693 gimple_seq_add_stmt (pre_p
, gimple_build_label (flabel
));
5697 /* A subroutine of lower_omp_single. Expand the simple form of
5698 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5700 #pragma omp single copyprivate (a, b, c)
5702 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5705 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5711 GOMP_single_copy_end (©out);
5722 FIXME. It may be better to delay expanding the logic of this until
5723 pass_expand_omp. The expanded logic may make the job more difficult
5724 to a synchronization analysis pass. */
5727 lower_omp_single_copy (gimple single_stmt
, gimple_seq
*pre_p
, omp_context
*ctx
)
5729 tree ptr_type
, t
, l0
, l1
, l2
;
5730 gimple_seq copyin_seq
;
5731 location_t loc
= gimple_location (single_stmt
);
5733 ctx
->sender_decl
= create_tmp_var (ctx
->record_type
, ".omp_copy_o");
5735 ptr_type
= build_pointer_type (ctx
->record_type
);
5736 ctx
->receiver_decl
= create_tmp_var (ptr_type
, ".omp_copy_i");
5738 l0
= create_artificial_label (loc
);
5739 l1
= create_artificial_label (loc
);
5740 l2
= create_artificial_label (loc
);
5742 t
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_START
], 0);
5743 t
= fold_convert_loc (loc
, ptr_type
, t
);
5744 gimplify_assign (ctx
->receiver_decl
, t
, pre_p
);
5746 t
= build2 (EQ_EXPR
, boolean_type_node
, ctx
->receiver_decl
,
5747 build_int_cst (ptr_type
, 0));
5748 t
= build3 (COND_EXPR
, void_type_node
, t
,
5749 build_and_jump (&l0
), build_and_jump (&l1
));
5750 gimplify_and_add (t
, pre_p
);
5752 gimple_seq_add_stmt (pre_p
, gimple_build_label (l0
));
5754 gimple_seq_add_seq (pre_p
, gimple_omp_body (single_stmt
));
5757 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt
), pre_p
,
5760 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
5761 t
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_GOMP_SINGLE_COPY_END
],
5763 gimplify_and_add (t
, pre_p
);
5765 t
= build_and_jump (&l2
);
5766 gimplify_and_add (t
, pre_p
);
5768 gimple_seq_add_stmt (pre_p
, gimple_build_label (l1
));
5770 gimple_seq_add_seq (pre_p
, copyin_seq
);
5772 gimple_seq_add_stmt (pre_p
, gimple_build_label (l2
));
5776 /* Expand code for an OpenMP single directive. */
5779 lower_omp_single (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5782 gimple t
, bind
, single_stmt
= gsi_stmt (*gsi_p
);
5783 gimple_seq bind_body
, dlist
;
5784 struct gimplify_ctx gctx
;
5786 push_gimplify_context (&gctx
);
5789 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt
),
5790 &bind_body
, &dlist
, ctx
);
5791 lower_omp (gimple_omp_body (single_stmt
), ctx
);
5793 gimple_seq_add_stmt (&bind_body
, single_stmt
);
5795 if (ctx
->record_type
)
5796 lower_omp_single_copy (single_stmt
, &bind_body
, ctx
);
5798 lower_omp_single_simple (single_stmt
, &bind_body
);
5800 gimple_omp_set_body (single_stmt
, NULL
);
5802 gimple_seq_add_seq (&bind_body
, dlist
);
5804 bind_body
= maybe_catch_exception (bind_body
);
5806 t
= gimple_build_omp_return
5807 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt
),
5808 OMP_CLAUSE_NOWAIT
));
5809 gimple_seq_add_stmt (&bind_body
, t
);
5811 block
= make_node (BLOCK
);
5812 bind
= gimple_build_bind (NULL
, bind_body
, block
);
5814 pop_gimplify_context (bind
);
5816 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5817 BLOCK_VARS (block
) = ctx
->block_vars
;
5818 gsi_replace (gsi_p
, bind
, true);
5819 if (BLOCK_VARS (block
))
5820 TREE_USED (block
) = 1;
5824 /* Expand code for an OpenMP master directive. */
5827 lower_omp_master (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5829 tree block
, lab
= NULL
, x
;
5830 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
5831 location_t loc
= gimple_location (stmt
);
5833 struct gimplify_ctx gctx
;
5835 push_gimplify_context (&gctx
);
5837 block
= make_node (BLOCK
);
5838 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
5841 x
= build_call_expr_loc (loc
, built_in_decls
[BUILT_IN_OMP_GET_THREAD_NUM
], 0);
5842 x
= build2 (EQ_EXPR
, boolean_type_node
, x
, integer_zero_node
);
5843 x
= build3 (COND_EXPR
, void_type_node
, x
, NULL
, build_and_jump (&lab
));
5845 gimplify_and_add (x
, &tseq
);
5846 gimple_bind_add_seq (bind
, tseq
);
5848 lower_omp (gimple_omp_body (stmt
), ctx
);
5849 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5850 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5851 gimple_omp_set_body (stmt
, NULL
);
5853 gimple_bind_add_stmt (bind
, gimple_build_label (lab
));
5855 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
5857 pop_gimplify_context (bind
);
5859 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5860 BLOCK_VARS (block
) = ctx
->block_vars
;
5861 gsi_replace (gsi_p
, bind
, true);
5865 /* Expand code for an OpenMP ordered directive. */
5868 lower_omp_ordered (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5871 gimple stmt
= gsi_stmt (*gsi_p
), bind
, x
;
5872 struct gimplify_ctx gctx
;
5874 push_gimplify_context (&gctx
);
5876 block
= make_node (BLOCK
);
5877 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
),
5880 x
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ORDERED_START
], 0);
5881 gimple_bind_add_stmt (bind
, x
);
5883 lower_omp (gimple_omp_body (stmt
), ctx
);
5884 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5885 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5886 gimple_omp_set_body (stmt
, NULL
);
5888 x
= gimple_build_call (built_in_decls
[BUILT_IN_GOMP_ORDERED_END
], 0);
5889 gimple_bind_add_stmt (bind
, x
);
5891 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
5893 pop_gimplify_context (bind
);
5895 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5896 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5897 gsi_replace (gsi_p
, bind
, true);
5901 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5902 substitution of a couple of function calls. But in the NAMED case,
5903 requires that languages coordinate a symbol name. It is therefore
5904 best put here in common code. */
5906 static GTY((param1_is (tree
), param2_is (tree
)))
5907 splay_tree critical_name_mutexes
;
5910 lower_omp_critical (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
5913 tree name
, lock
, unlock
;
5914 gimple stmt
= gsi_stmt (*gsi_p
), bind
;
5915 location_t loc
= gimple_location (stmt
);
5917 struct gimplify_ctx gctx
;
5919 name
= gimple_omp_critical_name (stmt
);
5925 if (!critical_name_mutexes
)
5926 critical_name_mutexes
5927 = splay_tree_new_ggc (splay_tree_compare_pointers
,
5928 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s
,
5929 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s
);
5931 n
= splay_tree_lookup (critical_name_mutexes
, (splay_tree_key
) name
);
5936 decl
= create_tmp_var_raw (ptr_type_node
, NULL
);
5938 new_str
= ACONCAT ((".gomp_critical_user_",
5939 IDENTIFIER_POINTER (name
), NULL
));
5940 DECL_NAME (decl
) = get_identifier (new_str
);
5941 TREE_PUBLIC (decl
) = 1;
5942 TREE_STATIC (decl
) = 1;
5943 DECL_COMMON (decl
) = 1;
5944 DECL_ARTIFICIAL (decl
) = 1;
5945 DECL_IGNORED_P (decl
) = 1;
5946 varpool_finalize_decl (decl
);
5948 splay_tree_insert (critical_name_mutexes
, (splay_tree_key
) name
,
5949 (splay_tree_value
) decl
);
5952 decl
= (tree
) n
->value
;
5954 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_START
];
5955 lock
= build_call_expr_loc (loc
, lock
, 1, build_fold_addr_expr_loc (loc
, decl
));
5957 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_NAME_END
];
5958 unlock
= build_call_expr_loc (loc
, unlock
, 1,
5959 build_fold_addr_expr_loc (loc
, decl
));
5963 lock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_START
];
5964 lock
= build_call_expr_loc (loc
, lock
, 0);
5966 unlock
= built_in_decls
[BUILT_IN_GOMP_CRITICAL_END
];
5967 unlock
= build_call_expr_loc (loc
, unlock
, 0);
5970 push_gimplify_context (&gctx
);
5972 block
= make_node (BLOCK
);
5973 bind
= gimple_build_bind (NULL
, gimple_seq_alloc_with_stmt (stmt
), block
);
5975 tbody
= gimple_bind_body (bind
);
5976 gimplify_and_add (lock
, &tbody
);
5977 gimple_bind_set_body (bind
, tbody
);
5979 lower_omp (gimple_omp_body (stmt
), ctx
);
5980 gimple_omp_set_body (stmt
, maybe_catch_exception (gimple_omp_body (stmt
)));
5981 gimple_bind_add_seq (bind
, gimple_omp_body (stmt
));
5982 gimple_omp_set_body (stmt
, NULL
);
5984 tbody
= gimple_bind_body (bind
);
5985 gimplify_and_add (unlock
, &tbody
);
5986 gimple_bind_set_body (bind
, tbody
);
5988 gimple_bind_add_stmt (bind
, gimple_build_omp_return (true));
5990 pop_gimplify_context (bind
);
5991 gimple_bind_append_vars (bind
, ctx
->block_vars
);
5992 BLOCK_VARS (block
) = gimple_bind_vars (bind
);
5993 gsi_replace (gsi_p
, bind
, true);
5997 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5998 for a lastprivate clause. Given a loop control predicate of (V
5999 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6000 is appended to *DLIST, iterator initialization is appended to
6004 lower_omp_for_lastprivate (struct omp_for_data
*fd
, gimple_seq
*body_p
,
6005 gimple_seq
*dlist
, struct omp_context
*ctx
)
6007 tree clauses
, cond
, vinit
;
6008 enum tree_code cond_code
;
6011 cond_code
= fd
->loop
.cond_code
;
6012 cond_code
= cond_code
== LT_EXPR
? GE_EXPR
: LE_EXPR
;
6014 /* When possible, use a strict equality expression. This can let VRP
6015 type optimizations deduce the value and remove a copy. */
6016 if (host_integerp (fd
->loop
.step
, 0))
6018 HOST_WIDE_INT step
= TREE_INT_CST_LOW (fd
->loop
.step
);
6019 if (step
== 1 || step
== -1)
6020 cond_code
= EQ_EXPR
;
6023 cond
= build2 (cond_code
, boolean_type_node
, fd
->loop
.v
, fd
->loop
.n2
);
6025 clauses
= gimple_omp_for_clauses (fd
->for_stmt
);
6027 lower_lastprivate_clauses (clauses
, cond
, &stmts
, ctx
);
6028 if (!gimple_seq_empty_p (stmts
))
6030 gimple_seq_add_seq (&stmts
, *dlist
);
6033 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6034 vinit
= fd
->loop
.n1
;
6035 if (cond_code
== EQ_EXPR
6036 && host_integerp (fd
->loop
.n2
, 0)
6037 && ! integer_zerop (fd
->loop
.n2
))
6038 vinit
= build_int_cst (TREE_TYPE (fd
->loop
.v
), 0);
6040 /* Initialize the iterator variable, so that threads that don't execute
6041 any iterations don't execute the lastprivate clauses by accident. */
6042 gimplify_assign (fd
->loop
.v
, vinit
, body_p
);
6047 /* Lower code for an OpenMP loop directive. */
6050 lower_omp_for (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6053 struct omp_for_data fd
;
6054 gimple stmt
= gsi_stmt (*gsi_p
), new_stmt
;
6055 gimple_seq omp_for_body
, body
, dlist
;
6057 struct gimplify_ctx gctx
;
6059 push_gimplify_context (&gctx
);
6061 lower_omp (gimple_omp_for_pre_body (stmt
), ctx
);
6062 lower_omp (gimple_omp_body (stmt
), ctx
);
6064 block
= make_node (BLOCK
);
6065 new_stmt
= gimple_build_bind (NULL
, NULL
, block
);
6067 /* Move declaration of temporaries in the loop body before we make
6069 omp_for_body
= gimple_omp_body (stmt
);
6070 if (!gimple_seq_empty_p (omp_for_body
)
6071 && gimple_code (gimple_seq_first_stmt (omp_for_body
)) == GIMPLE_BIND
)
6073 tree vars
= gimple_bind_vars (gimple_seq_first_stmt (omp_for_body
));
6074 gimple_bind_append_vars (new_stmt
, vars
);
6077 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6080 lower_rec_input_clauses (gimple_omp_for_clauses (stmt
), &body
, &dlist
, ctx
);
6081 gimple_seq_add_seq (&body
, gimple_omp_for_pre_body (stmt
));
6083 /* Lower the header expressions. At this point, we can assume that
6084 the header is of the form:
6086 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6088 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6089 using the .omp_data_s mapping, if needed. */
6090 for (i
= 0; i
< gimple_omp_for_collapse (stmt
); i
++)
6092 rhs_p
= gimple_omp_for_initial_ptr (stmt
, i
);
6093 if (!is_gimple_min_invariant (*rhs_p
))
6094 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6096 rhs_p
= gimple_omp_for_final_ptr (stmt
, i
);
6097 if (!is_gimple_min_invariant (*rhs_p
))
6098 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6100 rhs_p
= &TREE_OPERAND (gimple_omp_for_incr (stmt
, i
), 1);
6101 if (!is_gimple_min_invariant (*rhs_p
))
6102 *rhs_p
= get_formal_tmp_var (*rhs_p
, &body
);
6105 /* Once lowered, extract the bounds and clauses. */
6106 extract_omp_for_data (stmt
, &fd
, NULL
);
6108 lower_omp_for_lastprivate (&fd
, &body
, &dlist
, ctx
);
6110 gimple_seq_add_stmt (&body
, stmt
);
6111 gimple_seq_add_seq (&body
, gimple_omp_body (stmt
));
6113 gimple_seq_add_stmt (&body
, gimple_build_omp_continue (fd
.loop
.v
,
6116 /* After the loop, add exit clauses. */
6117 lower_reduction_clauses (gimple_omp_for_clauses (stmt
), &body
, ctx
);
6118 gimple_seq_add_seq (&body
, dlist
);
6120 body
= maybe_catch_exception (body
);
6122 /* Region exit marker goes at the end of the loop body. */
6123 gimple_seq_add_stmt (&body
, gimple_build_omp_return (fd
.have_nowait
));
6125 pop_gimplify_context (new_stmt
);
6127 gimple_bind_append_vars (new_stmt
, ctx
->block_vars
);
6128 BLOCK_VARS (block
) = gimple_bind_vars (new_stmt
);
6129 if (BLOCK_VARS (block
))
6130 TREE_USED (block
) = 1;
6132 gimple_bind_set_body (new_stmt
, body
);
6133 gimple_omp_set_body (stmt
, NULL
);
6134 gimple_omp_for_set_pre_body (stmt
, NULL
);
6135 gsi_replace (gsi_p
, new_stmt
, true);
6138 /* Callback for walk_stmts. Check if the current statement only contains
6139 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6142 check_combined_parallel (gimple_stmt_iterator
*gsi_p
,
6143 bool *handled_ops_p
,
6144 struct walk_stmt_info
*wi
)
6146 int *info
= (int *) wi
->info
;
6147 gimple stmt
= gsi_stmt (*gsi_p
);
6149 *handled_ops_p
= true;
6150 switch (gimple_code (stmt
))
6154 case GIMPLE_OMP_FOR
:
6155 case GIMPLE_OMP_SECTIONS
:
6156 *info
= *info
== 0 ? 1 : -1;
6165 struct omp_taskcopy_context
6167 /* This field must be at the beginning, as we do "inheritance": Some
6168 callback functions for tree-inline.c (e.g., omp_copy_decl)
6169 receive a copy_body_data pointer that is up-casted to an
6170 omp_context pointer. */
6176 task_copyfn_copy_decl (tree var
, copy_body_data
*cb
)
6178 struct omp_taskcopy_context
*tcctx
= (struct omp_taskcopy_context
*) cb
;
6180 if (splay_tree_lookup (tcctx
->ctx
->sfield_map
, (splay_tree_key
) var
))
6181 return create_tmp_var (TREE_TYPE (var
), NULL
);
6187 task_copyfn_remap_type (struct omp_taskcopy_context
*tcctx
, tree orig_type
)
6189 tree name
, new_fields
= NULL
, type
, f
;
6191 type
= lang_hooks
.types
.make_type (RECORD_TYPE
);
6192 name
= DECL_NAME (TYPE_NAME (orig_type
));
6193 name
= build_decl (gimple_location (tcctx
->ctx
->stmt
),
6194 TYPE_DECL
, name
, type
);
6195 TYPE_NAME (type
) = name
;
6197 for (f
= TYPE_FIELDS (orig_type
); f
; f
= TREE_CHAIN (f
))
6199 tree new_f
= copy_node (f
);
6200 DECL_CONTEXT (new_f
) = type
;
6201 TREE_TYPE (new_f
) = remap_type (TREE_TYPE (f
), &tcctx
->cb
);
6202 TREE_CHAIN (new_f
) = new_fields
;
6203 walk_tree (&DECL_SIZE (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6204 walk_tree (&DECL_SIZE_UNIT (new_f
), copy_tree_body_r
, &tcctx
->cb
, NULL
);
6205 walk_tree (&DECL_FIELD_OFFSET (new_f
), copy_tree_body_r
,
6208 *pointer_map_insert (tcctx
->cb
.decl_map
, f
) = new_f
;
6210 TYPE_FIELDS (type
) = nreverse (new_fields
);
6215 /* Create task copyfn. */
6218 create_task_copyfn (gimple task_stmt
, omp_context
*ctx
)
6220 struct function
*child_cfun
;
6221 tree child_fn
, t
, c
, src
, dst
, f
, sf
, arg
, sarg
, decl
;
6222 tree record_type
, srecord_type
, bind
, list
;
6223 bool record_needs_remap
= false, srecord_needs_remap
= false;
6225 struct omp_taskcopy_context tcctx
;
6226 struct gimplify_ctx gctx
;
6227 location_t loc
= gimple_location (task_stmt
);
6229 child_fn
= gimple_omp_task_copy_fn (task_stmt
);
6230 child_cfun
= DECL_STRUCT_FUNCTION (child_fn
);
6231 gcc_assert (child_cfun
->cfg
== NULL
);
6232 DECL_SAVED_TREE (child_fn
) = alloc_stmt_list ();
6234 /* Reset DECL_CONTEXT on function arguments. */
6235 for (t
= DECL_ARGUMENTS (child_fn
); t
; t
= DECL_CHAIN (t
))
6236 DECL_CONTEXT (t
) = child_fn
;
6238 /* Populate the function. */
6239 push_gimplify_context (&gctx
);
6240 current_function_decl
= child_fn
;
6242 bind
= build3 (BIND_EXPR
, void_type_node
, NULL
, NULL
, NULL
);
6243 TREE_SIDE_EFFECTS (bind
) = 1;
6245 DECL_SAVED_TREE (child_fn
) = bind
;
6246 DECL_SOURCE_LOCATION (child_fn
) = gimple_location (task_stmt
);
6248 /* Remap src and dst argument types if needed. */
6249 record_type
= ctx
->record_type
;
6250 srecord_type
= ctx
->srecord_type
;
6251 for (f
= TYPE_FIELDS (record_type
); f
; f
= DECL_CHAIN (f
))
6252 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6254 record_needs_remap
= true;
6257 for (f
= TYPE_FIELDS (srecord_type
); f
; f
= DECL_CHAIN (f
))
6258 if (variably_modified_type_p (TREE_TYPE (f
), ctx
->cb
.src_fn
))
6260 srecord_needs_remap
= true;
6264 if (record_needs_remap
|| srecord_needs_remap
)
6266 memset (&tcctx
, '\0', sizeof (tcctx
));
6267 tcctx
.cb
.src_fn
= ctx
->cb
.src_fn
;
6268 tcctx
.cb
.dst_fn
= child_fn
;
6269 tcctx
.cb
.src_node
= cgraph_get_node (tcctx
.cb
.src_fn
);
6270 gcc_checking_assert (tcctx
.cb
.src_node
);
6271 tcctx
.cb
.dst_node
= tcctx
.cb
.src_node
;
6272 tcctx
.cb
.src_cfun
= ctx
->cb
.src_cfun
;
6273 tcctx
.cb
.copy_decl
= task_copyfn_copy_decl
;
6274 tcctx
.cb
.eh_lp_nr
= 0;
6275 tcctx
.cb
.transform_call_graph_edges
= CB_CGE_MOVE
;
6276 tcctx
.cb
.decl_map
= pointer_map_create ();
6279 if (record_needs_remap
)
6280 record_type
= task_copyfn_remap_type (&tcctx
, record_type
);
6281 if (srecord_needs_remap
)
6282 srecord_type
= task_copyfn_remap_type (&tcctx
, srecord_type
);
6285 tcctx
.cb
.decl_map
= NULL
;
6287 push_cfun (child_cfun
);
6289 arg
= DECL_ARGUMENTS (child_fn
);
6290 TREE_TYPE (arg
) = build_pointer_type (record_type
);
6291 sarg
= DECL_CHAIN (arg
);
6292 TREE_TYPE (sarg
) = build_pointer_type (srecord_type
);
6294 /* First pass: initialize temporaries used in record_type and srecord_type
6295 sizes and field offsets. */
6296 if (tcctx
.cb
.decl_map
)
6297 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6298 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6302 decl
= OMP_CLAUSE_DECL (c
);
6303 p
= (tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, decl
);
6306 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6307 sf
= (tree
) n
->value
;
6308 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6309 src
= build_simple_mem_ref_loc (loc
, sarg
);
6310 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6311 t
= build2 (MODIFY_EXPR
, TREE_TYPE (*p
), *p
, src
);
6312 append_to_statement_list (t
, &list
);
6315 /* Second pass: copy shared var pointers and copy construct non-VLA
6316 firstprivate vars. */
6317 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6318 switch (OMP_CLAUSE_CODE (c
))
6320 case OMP_CLAUSE_SHARED
:
6321 decl
= OMP_CLAUSE_DECL (c
);
6322 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6325 f
= (tree
) n
->value
;
6326 if (tcctx
.cb
.decl_map
)
6327 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6328 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6329 sf
= (tree
) n
->value
;
6330 if (tcctx
.cb
.decl_map
)
6331 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6332 src
= build_simple_mem_ref_loc (loc
, sarg
);
6333 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6334 dst
= build_simple_mem_ref_loc (loc
, arg
);
6335 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6336 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6337 append_to_statement_list (t
, &list
);
6339 case OMP_CLAUSE_FIRSTPRIVATE
:
6340 decl
= OMP_CLAUSE_DECL (c
);
6341 if (is_variable_sized (decl
))
6343 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6346 f
= (tree
) n
->value
;
6347 if (tcctx
.cb
.decl_map
)
6348 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6349 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6352 sf
= (tree
) n
->value
;
6353 if (tcctx
.cb
.decl_map
)
6354 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6355 src
= build_simple_mem_ref_loc (loc
, sarg
);
6356 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6357 if (use_pointer_for_field (decl
, NULL
) || is_reference (decl
))
6358 src
= build_simple_mem_ref_loc (loc
, src
);
6362 dst
= build_simple_mem_ref_loc (loc
, arg
);
6363 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6364 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6365 append_to_statement_list (t
, &list
);
6367 case OMP_CLAUSE_PRIVATE
:
6368 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c
))
6370 decl
= OMP_CLAUSE_DECL (c
);
6371 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6372 f
= (tree
) n
->value
;
6373 if (tcctx
.cb
.decl_map
)
6374 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6375 n
= splay_tree_lookup (ctx
->sfield_map
, (splay_tree_key
) decl
);
6378 sf
= (tree
) n
->value
;
6379 if (tcctx
.cb
.decl_map
)
6380 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6381 src
= build_simple_mem_ref_loc (loc
, sarg
);
6382 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6383 if (use_pointer_for_field (decl
, NULL
))
6384 src
= build_simple_mem_ref_loc (loc
, src
);
6388 dst
= build_simple_mem_ref_loc (loc
, arg
);
6389 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6390 t
= build2 (MODIFY_EXPR
, TREE_TYPE (dst
), dst
, src
);
6391 append_to_statement_list (t
, &list
);
6397 /* Last pass: handle VLA firstprivates. */
6398 if (tcctx
.cb
.decl_map
)
6399 for (c
= gimple_omp_task_clauses (task_stmt
); c
; c
= OMP_CLAUSE_CHAIN (c
))
6400 if (OMP_CLAUSE_CODE (c
) == OMP_CLAUSE_FIRSTPRIVATE
)
6404 decl
= OMP_CLAUSE_DECL (c
);
6405 if (!is_variable_sized (decl
))
6407 n
= splay_tree_lookup (ctx
->field_map
, (splay_tree_key
) decl
);
6410 f
= (tree
) n
->value
;
6411 f
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, f
);
6412 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl
));
6413 ind
= DECL_VALUE_EXPR (decl
);
6414 gcc_assert (TREE_CODE (ind
) == INDIRECT_REF
);
6415 gcc_assert (DECL_P (TREE_OPERAND (ind
, 0)));
6416 n
= splay_tree_lookup (ctx
->sfield_map
,
6417 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6418 sf
= (tree
) n
->value
;
6419 sf
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, sf
);
6420 src
= build_simple_mem_ref_loc (loc
, sarg
);
6421 src
= build3 (COMPONENT_REF
, TREE_TYPE (sf
), src
, sf
, NULL
);
6422 src
= build_simple_mem_ref_loc (loc
, src
);
6423 dst
= build_simple_mem_ref_loc (loc
, arg
);
6424 dst
= build3 (COMPONENT_REF
, TREE_TYPE (f
), dst
, f
, NULL
);
6425 t
= lang_hooks
.decls
.omp_clause_copy_ctor (c
, dst
, src
);
6426 append_to_statement_list (t
, &list
);
6427 n
= splay_tree_lookup (ctx
->field_map
,
6428 (splay_tree_key
) TREE_OPERAND (ind
, 0));
6429 df
= (tree
) n
->value
;
6430 df
= *(tree
*) pointer_map_contains (tcctx
.cb
.decl_map
, df
);
6431 ptr
= build_simple_mem_ref_loc (loc
, arg
);
6432 ptr
= build3 (COMPONENT_REF
, TREE_TYPE (df
), ptr
, df
, NULL
);
6433 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ptr
), ptr
,
6434 build_fold_addr_expr_loc (loc
, dst
));
6435 append_to_statement_list (t
, &list
);
6438 t
= build1 (RETURN_EXPR
, void_type_node
, NULL
);
6439 append_to_statement_list (t
, &list
);
6441 if (tcctx
.cb
.decl_map
)
6442 pointer_map_destroy (tcctx
.cb
.decl_map
);
6443 pop_gimplify_context (NULL
);
6444 BIND_EXPR_BODY (bind
) = list
;
6446 current_function_decl
= ctx
->cb
.src_fn
;
6449 /* Lower the OpenMP parallel or task directive in the current statement
6450 in GSI_P. CTX holds context information for the directive. */
6453 lower_omp_taskreg (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6457 gimple stmt
= gsi_stmt (*gsi_p
);
6458 gimple par_bind
, bind
;
6459 gimple_seq par_body
, olist
, ilist
, par_olist
, par_ilist
, new_body
;
6460 struct gimplify_ctx gctx
;
6461 location_t loc
= gimple_location (stmt
);
6463 clauses
= gimple_omp_taskreg_clauses (stmt
);
6464 par_bind
= gimple_seq_first_stmt (gimple_omp_body (stmt
));
6465 par_body
= gimple_bind_body (par_bind
);
6466 child_fn
= ctx
->cb
.dst_fn
;
6467 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
6468 && !gimple_omp_parallel_combined_p (stmt
))
6470 struct walk_stmt_info wi
;
6473 memset (&wi
, 0, sizeof (wi
));
6476 walk_gimple_seq (par_body
, check_combined_parallel
, NULL
, &wi
);
6478 gimple_omp_parallel_set_combined_p (stmt
, true);
6480 if (ctx
->srecord_type
)
6481 create_task_copyfn (stmt
, ctx
);
6483 push_gimplify_context (&gctx
);
6487 lower_rec_input_clauses (clauses
, &par_ilist
, &par_olist
, ctx
);
6488 lower_omp (par_body
, ctx
);
6489 if (gimple_code (stmt
) == GIMPLE_OMP_PARALLEL
)
6490 lower_reduction_clauses (clauses
, &par_olist
, ctx
);
6492 /* Declare all the variables created by mapping and the variables
6493 declared in the scope of the parallel body. */
6494 record_vars_into (ctx
->block_vars
, child_fn
);
6495 record_vars_into (gimple_bind_vars (par_bind
), child_fn
);
6497 if (ctx
->record_type
)
6500 = create_tmp_var (ctx
->srecord_type
? ctx
->srecord_type
6501 : ctx
->record_type
, ".omp_data_o");
6502 DECL_NAMELESS (ctx
->sender_decl
) = 1;
6503 TREE_ADDRESSABLE (ctx
->sender_decl
) = 1;
6504 gimple_omp_taskreg_set_data_arg (stmt
, ctx
->sender_decl
);
6509 lower_send_clauses (clauses
, &ilist
, &olist
, ctx
);
6510 lower_send_shared_vars (&ilist
, &olist
, ctx
);
6512 /* Once all the expansions are done, sequence all the different
6513 fragments inside gimple_omp_body. */
6517 if (ctx
->record_type
)
6519 t
= build_fold_addr_expr_loc (loc
, ctx
->sender_decl
);
6520 /* fixup_child_record_type might have changed receiver_decl's type. */
6521 t
= fold_convert_loc (loc
, TREE_TYPE (ctx
->receiver_decl
), t
);
6522 gimple_seq_add_stmt (&new_body
,
6523 gimple_build_assign (ctx
->receiver_decl
, t
));
6526 gimple_seq_add_seq (&new_body
, par_ilist
);
6527 gimple_seq_add_seq (&new_body
, par_body
);
6528 gimple_seq_add_seq (&new_body
, par_olist
);
6529 new_body
= maybe_catch_exception (new_body
);
6530 gimple_seq_add_stmt (&new_body
, gimple_build_omp_return (false));
6531 gimple_omp_set_body (stmt
, new_body
);
6533 bind
= gimple_build_bind (NULL
, NULL
, gimple_bind_block (par_bind
));
6534 gimple_bind_add_stmt (bind
, stmt
);
6537 gimple_seq_add_stmt (&ilist
, bind
);
6538 gimple_seq_add_seq (&ilist
, olist
);
6539 bind
= gimple_build_bind (NULL
, ilist
, NULL
);
6542 gsi_replace (gsi_p
, bind
, true);
6544 pop_gimplify_context (NULL
);
6547 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6548 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6549 of OpenMP context, but with task_shared_vars set. */
6552 lower_omp_regimplify_p (tree
*tp
, int *walk_subtrees
,
6557 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6558 if (TREE_CODE (t
) == VAR_DECL
&& data
== NULL
&& DECL_HAS_VALUE_EXPR_P (t
))
6561 if (task_shared_vars
6563 && bitmap_bit_p (task_shared_vars
, DECL_UID (t
)))
6566 /* If a global variable has been privatized, TREE_CONSTANT on
6567 ADDR_EXPR might be wrong. */
6568 if (data
== NULL
&& TREE_CODE (t
) == ADDR_EXPR
)
6569 recompute_tree_invariant_for_addr_expr (t
);
6571 *walk_subtrees
= !TYPE_P (t
) && !DECL_P (t
);
6576 lower_omp_1 (gimple_stmt_iterator
*gsi_p
, omp_context
*ctx
)
6578 gimple stmt
= gsi_stmt (*gsi_p
);
6579 struct walk_stmt_info wi
;
6581 if (gimple_has_location (stmt
))
6582 input_location
= gimple_location (stmt
);
6584 if (task_shared_vars
)
6585 memset (&wi
, '\0', sizeof (wi
));
6587 /* If we have issued syntax errors, avoid doing any heavy lifting.
6588 Just replace the OpenMP directives with a NOP to avoid
6589 confusing RTL expansion. */
6590 if (seen_error () && is_gimple_omp (stmt
))
6592 gsi_replace (gsi_p
, gimple_build_nop (), true);
6596 switch (gimple_code (stmt
))
6599 if ((ctx
|| task_shared_vars
)
6600 && (walk_tree (gimple_cond_lhs_ptr (stmt
), lower_omp_regimplify_p
,
6601 ctx
? NULL
: &wi
, NULL
)
6602 || walk_tree (gimple_cond_rhs_ptr (stmt
), lower_omp_regimplify_p
,
6603 ctx
? NULL
: &wi
, NULL
)))
6604 gimple_regimplify_operands (stmt
, gsi_p
);
6607 lower_omp (gimple_catch_handler (stmt
), ctx
);
6609 case GIMPLE_EH_FILTER
:
6610 lower_omp (gimple_eh_filter_failure (stmt
), ctx
);
6613 lower_omp (gimple_try_eval (stmt
), ctx
);
6614 lower_omp (gimple_try_cleanup (stmt
), ctx
);
6617 lower_omp (gimple_bind_body (stmt
), ctx
);
6619 case GIMPLE_OMP_PARALLEL
:
6620 case GIMPLE_OMP_TASK
:
6621 ctx
= maybe_lookup_ctx (stmt
);
6622 lower_omp_taskreg (gsi_p
, ctx
);
6624 case GIMPLE_OMP_FOR
:
6625 ctx
= maybe_lookup_ctx (stmt
);
6627 lower_omp_for (gsi_p
, ctx
);
6629 case GIMPLE_OMP_SECTIONS
:
6630 ctx
= maybe_lookup_ctx (stmt
);
6632 lower_omp_sections (gsi_p
, ctx
);
6634 case GIMPLE_OMP_SINGLE
:
6635 ctx
= maybe_lookup_ctx (stmt
);
6637 lower_omp_single (gsi_p
, ctx
);
6639 case GIMPLE_OMP_MASTER
:
6640 ctx
= maybe_lookup_ctx (stmt
);
6642 lower_omp_master (gsi_p
, ctx
);
6644 case GIMPLE_OMP_ORDERED
:
6645 ctx
= maybe_lookup_ctx (stmt
);
6647 lower_omp_ordered (gsi_p
, ctx
);
6649 case GIMPLE_OMP_CRITICAL
:
6650 ctx
= maybe_lookup_ctx (stmt
);
6652 lower_omp_critical (gsi_p
, ctx
);
6654 case GIMPLE_OMP_ATOMIC_LOAD
:
6655 if ((ctx
|| task_shared_vars
)
6656 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt
),
6657 lower_omp_regimplify_p
, ctx
? NULL
: &wi
, NULL
))
6658 gimple_regimplify_operands (stmt
, gsi_p
);
6661 if ((ctx
|| task_shared_vars
)
6662 && walk_gimple_op (stmt
, lower_omp_regimplify_p
,
6664 gimple_regimplify_operands (stmt
, gsi_p
);
6670 lower_omp (gimple_seq body
, omp_context
*ctx
)
6672 location_t saved_location
= input_location
;
6673 gimple_stmt_iterator gsi
= gsi_start (body
);
6674 for (gsi
= gsi_start (body
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6675 lower_omp_1 (&gsi
, ctx
);
6676 input_location
= saved_location
;
6679 /* Main entry point. */
6682 execute_lower_omp (void)
6686 /* This pass always runs, to provide PROP_gimple_lomp.
6687 But there is nothing to do unless -fopenmp is given. */
6688 if (flag_openmp
== 0)
6691 all_contexts
= splay_tree_new (splay_tree_compare_pointers
, 0,
6692 delete_omp_context
);
6694 body
= gimple_body (current_function_decl
);
6695 scan_omp (body
, NULL
);
6696 gcc_assert (taskreg_nesting_level
== 0);
6698 if (all_contexts
->root
)
6700 struct gimplify_ctx gctx
;
6702 if (task_shared_vars
)
6703 push_gimplify_context (&gctx
);
6704 lower_omp (body
, NULL
);
6705 if (task_shared_vars
)
6706 pop_gimplify_context (NULL
);
6711 splay_tree_delete (all_contexts
);
6712 all_contexts
= NULL
;
6714 BITMAP_FREE (task_shared_vars
);
6718 struct gimple_opt_pass pass_lower_omp
=
6722 "omplower", /* name */
6724 execute_lower_omp
, /* execute */
6727 0, /* static_pass_number */
6728 TV_NONE
, /* tv_id */
6729 PROP_gimple_any
, /* properties_required */
6730 PROP_gimple_lomp
, /* properties_provided */
6731 0, /* properties_destroyed */
6732 0, /* todo_flags_start */
6733 TODO_dump_func
/* todo_flags_finish */
6737 /* The following is a utility to diagnose OpenMP structured block violations.
6738 It is not part of the "omplower" pass, as that's invoked too late. It
6739 should be invoked by the respective front ends after gimplification. */
6741 static splay_tree all_labels
;
6743 /* Check for mismatched contexts and generate an error if needed. Return
6744 true if an error is detected. */
6747 diagnose_sb_0 (gimple_stmt_iterator
*gsi_p
,
6748 gimple branch_ctx
, gimple label_ctx
)
6750 if (label_ctx
== branch_ctx
)
6755 Previously we kept track of the label's entire context in diagnose_sb_[12]
6756 so we could traverse it and issue a correct "exit" or "enter" error
6757 message upon a structured block violation.
6759 We built the context by building a list with tree_cons'ing, but there is
6760 no easy counterpart in gimple tuples. It seems like far too much work
6761 for issuing exit/enter error messages. If someone really misses the
6762 distinct error message... patches welcome.
6766 /* Try to avoid confusing the user by producing and error message
6767 with correct "exit" or "enter" verbiage. We prefer "exit"
6768 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6769 if (branch_ctx
== NULL
)
6775 if (TREE_VALUE (label_ctx
) == branch_ctx
)
6780 label_ctx
= TREE_CHAIN (label_ctx
);
6785 error ("invalid exit from OpenMP structured block");
6787 error ("invalid entry to OpenMP structured block");
6790 /* If it's obvious we have an invalid entry, be specific about the error. */
6791 if (branch_ctx
== NULL
)
6792 error ("invalid entry to OpenMP structured block");
6794 /* Otherwise, be vague and lazy, but efficient. */
6795 error ("invalid branch to/from an OpenMP structured block");
6797 gsi_replace (gsi_p
, gimple_build_nop (), false);
6801 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6802 where each label is found. */
6805 diagnose_sb_1 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
6806 struct walk_stmt_info
*wi
)
6808 gimple context
= (gimple
) wi
->info
;
6809 gimple inner_context
;
6810 gimple stmt
= gsi_stmt (*gsi_p
);
6812 *handled_ops_p
= true;
6814 switch (gimple_code (stmt
))
6818 case GIMPLE_OMP_PARALLEL
:
6819 case GIMPLE_OMP_TASK
:
6820 case GIMPLE_OMP_SECTIONS
:
6821 case GIMPLE_OMP_SINGLE
:
6822 case GIMPLE_OMP_SECTION
:
6823 case GIMPLE_OMP_MASTER
:
6824 case GIMPLE_OMP_ORDERED
:
6825 case GIMPLE_OMP_CRITICAL
:
6826 /* The minimal context here is just the current OMP construct. */
6827 inner_context
= stmt
;
6828 wi
->info
= inner_context
;
6829 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
6833 case GIMPLE_OMP_FOR
:
6834 inner_context
= stmt
;
6835 wi
->info
= inner_context
;
6836 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6838 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
6839 diagnose_sb_1
, NULL
, wi
);
6840 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_1
, NULL
, wi
);
6845 splay_tree_insert (all_labels
, (splay_tree_key
) gimple_label_label (stmt
),
6846 (splay_tree_value
) context
);
6856 /* Pass 2: Check each branch and see if its context differs from that of
6857 the destination label's context. */
6860 diagnose_sb_2 (gimple_stmt_iterator
*gsi_p
, bool *handled_ops_p
,
6861 struct walk_stmt_info
*wi
)
6863 gimple context
= (gimple
) wi
->info
;
6865 gimple stmt
= gsi_stmt (*gsi_p
);
6867 *handled_ops_p
= true;
6869 switch (gimple_code (stmt
))
6873 case GIMPLE_OMP_PARALLEL
:
6874 case GIMPLE_OMP_TASK
:
6875 case GIMPLE_OMP_SECTIONS
:
6876 case GIMPLE_OMP_SINGLE
:
6877 case GIMPLE_OMP_SECTION
:
6878 case GIMPLE_OMP_MASTER
:
6879 case GIMPLE_OMP_ORDERED
:
6880 case GIMPLE_OMP_CRITICAL
:
6882 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
6886 case GIMPLE_OMP_FOR
:
6888 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6890 walk_gimple_seq (gimple_omp_for_pre_body (stmt
),
6891 diagnose_sb_2
, NULL
, wi
);
6892 walk_gimple_seq (gimple_omp_body (stmt
), diagnose_sb_2
, NULL
, wi
);
6898 tree lab
= gimple_cond_true_label (stmt
);
6901 n
= splay_tree_lookup (all_labels
,
6902 (splay_tree_key
) lab
);
6903 diagnose_sb_0 (gsi_p
, context
,
6904 n
? (gimple
) n
->value
: NULL
);
6906 lab
= gimple_cond_false_label (stmt
);
6909 n
= splay_tree_lookup (all_labels
,
6910 (splay_tree_key
) lab
);
6911 diagnose_sb_0 (gsi_p
, context
,
6912 n
? (gimple
) n
->value
: NULL
);
6919 tree lab
= gimple_goto_dest (stmt
);
6920 if (TREE_CODE (lab
) != LABEL_DECL
)
6923 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
6924 diagnose_sb_0 (gsi_p
, context
, n
? (gimple
) n
->value
: NULL
);
6931 for (i
= 0; i
< gimple_switch_num_labels (stmt
); ++i
)
6933 tree lab
= CASE_LABEL (gimple_switch_label (stmt
, i
));
6934 n
= splay_tree_lookup (all_labels
, (splay_tree_key
) lab
);
6935 if (n
&& diagnose_sb_0 (gsi_p
, context
, (gimple
) n
->value
))
6942 diagnose_sb_0 (gsi_p
, context
, NULL
);
6953 diagnose_omp_structured_block_errors (void)
6955 struct walk_stmt_info wi
;
6956 gimple_seq body
= gimple_body (current_function_decl
);
6958 all_labels
= splay_tree_new (splay_tree_compare_pointers
, 0, 0);
6960 memset (&wi
, 0, sizeof (wi
));
6961 walk_gimple_seq (body
, diagnose_sb_1
, NULL
, &wi
);
6963 memset (&wi
, 0, sizeof (wi
));
6964 wi
.want_locations
= true;
6965 walk_gimple_seq (body
, diagnose_sb_2
, NULL
, &wi
);
6967 splay_tree_delete (all_labels
);
6974 gate_diagnose_omp_blocks (void)
6976 return flag_openmp
!= 0;
6979 struct gimple_opt_pass pass_diagnose_omp_blocks
=
6983 "*diagnose_omp_blocks", /* name */
6984 gate_diagnose_omp_blocks
, /* gate */
6985 diagnose_omp_structured_block_errors
, /* execute */
6988 0, /* static_pass_number */
6989 TV_NONE
, /* tv_id */
6990 PROP_gimple_any
, /* properties_required */
6991 0, /* properties_provided */
6992 0, /* properties_destroyed */
6993 0, /* todo_flags_start */
6994 0, /* todo_flags_finish */
6998 #include "gt-omp-low.h"